text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def HasDynamicInvoke(self):
"""
Flag indicating if dynamic invocation is supported.
Returns:
bool: True if supported. False otherwise.
"""
from neo.Core.State.ContractState import ContractPropertyState
return self.ContractProperties & ContractPropertyState.HasDynamicInvoke > 0 | 0.008876 |
def _random_crop(image, size):
"""Make a random crop of (`size` x `size`)."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
random_image, bbox = distorted_bounding_box_crop(
image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=1,
scope=None)
bad = _at_least_x_are_true(tf.shape(image), tf.shape(random_image), 3)
image = tf.cond(
bad, lambda: _center_crop(_do_scale(image, size), size),
lambda: tf.image.resize_bicubic([random_image], [size, size])[0])
return image | 0.011272 |
def script_start_type(script):
"""Return the type of block the script begins with."""
if script[0].type.text == 'when @greenFlag clicked':
return HairballPlugin.HAT_GREEN_FLAG
elif script[0].type.text == 'when I receive %s':
return HairballPlugin.HAT_WHEN_I_RECEIVE
elif script[0].type.text == 'when this sprite clicked':
return HairballPlugin.HAT_MOUSE
elif script[0].type.text == 'when %s key pressed':
return HairballPlugin.HAT_KEY
else:
return HairballPlugin.NO_HAT | 0.003466 |
def utc_strftime(self, format):
"""Format the UTC time using a Python date formatting string.
This internally calls the Python ``strftime()`` routine from the
Standard Library ``time()`` module, for which you can find a
quick reference at ``http://strftime.org/``. If this object is
an array of times, then a sequence of strings is returned
instead of a single string.
"""
tup = self._utc_tuple(_half_second)
year, month, day, hour, minute, second = tup
second = second.astype(int)
zero = zeros_like(year)
tup = (year, month, day, hour, minute, second, zero, zero, zero)
if self.shape:
return [strftime(format, item) for item in zip(*tup)]
else:
return strftime(format, tup) | 0.00246 |
def _get_and_count_containers(self, custom_cgroups=False, healthchecks=False):
"""List all the containers from the API, filter and count them."""
# Querying the size of containers is slow, we don't do it at each run
must_query_size = self.collect_container_size and self._latest_size_query == 0
self._latest_size_query = (self._latest_size_query + 1) % SIZE_REFRESH_RATE
running_containers_count = Counter()
all_containers_count = Counter()
try:
containers = self.docker_util.client.containers(all=True, size=must_query_size)
except Exception as e:
message = "Unable to list Docker containers: {0}".format(e)
self.service_check(SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
message=message, tags=self.custom_tags)
raise Exception(message)
else:
self.service_check(SERVICE_CHECK_NAME, AgentCheck.OK, tags=self.custom_tags)
# Create a set of filtered containers based on the exclude/include rules
# and cache these rules in docker_util
self._filter_containers(containers)
containers_by_id = {}
for container in containers:
container_name = DockerUtil.container_name_extractor(container)[0]
container_status_tags = self._get_tags(container, CONTAINER)
all_containers_count[tuple(sorted(container_status_tags))] += 1
if self._is_container_running(container):
running_containers_count[tuple(sorted(container_status_tags))] += 1
# Check if the container is included/excluded via its tags
if self._is_container_excluded(container):
self.log.debug("Container {0} is excluded".format(container_name))
continue
containers_by_id[container['Id']] = container
# grab pid via API if custom cgroups - otherwise we won't find process when
# crawling for pids.
if custom_cgroups or healthchecks:
try:
inspect_dict = self.docker_util.client.inspect_container(container_name)
container['_pid'] = inspect_dict['State']['Pid']
container['health'] = inspect_dict['State'].get('Health', {})
except Exception as e:
self.log.debug("Unable to inspect Docker container: %s", e)
total_count = 0
# TODO: deprecate these 2, they should be replaced by _report_container_count
for tags, count in running_containers_count.iteritems():
total_count += count
self.gauge("docker.containers.running", count, tags=list(tags))
self.gauge("docker.containers.running.total", total_count, tags=self.custom_tags)
total_count = 0
for tags, count in all_containers_count.iteritems():
stopped_count = count - running_containers_count[tags]
total_count += stopped_count
self.gauge("docker.containers.stopped", stopped_count, tags=list(tags))
self.gauge("docker.containers.stopped.total", total_count, tags=self.custom_tags)
return containers_by_id | 0.004975 |
def process(self, salt_data, token, opts):
'''
Process events and publish data
'''
parts = salt_data['tag'].split('/')
if len(parts) < 2:
return
# TBD: Simplify these conditional expressions
if parts[1] == 'job':
if parts[3] == 'new':
self.process_new_job_event(salt_data)
if salt_data['data']['fun'] == 'grains.items':
self.minions = {}
elif parts[3] == 'ret':
self.process_ret_job_event(salt_data)
if salt_data['data']['fun'] == 'grains.items':
self.process_minion_update(salt_data)
if parts[1] == 'key':
self.process_key_event(salt_data)
if parts[1] == 'presence':
self.process_presence_events(salt_data, token, opts) | 0.002334 |
def _get_fieldsets_post_form_or_formset(self, request, form, obj=None):
"""
Generic get_fieldsets code, shared by
TranslationAdmin and TranslationInlineModelAdmin.
"""
base_fields = self.replace_orig_field(form.base_fields.keys())
fields = base_fields + list(self.get_readonly_fields(request, obj))
return [(None, {'fields': self.replace_orig_field(fields)})] | 0.004819 |
def view_status_code(codes):
"""Return status code or random status code if more than one are given
---
tags:
- Status codes
parameters:
- in: path
name: codes
produces:
- text/plain
responses:
100:
description: Informational responses
200:
description: Success
300:
description: Redirection
400:
description: Client Errors
500:
description: Server Errors
"""
if "," not in codes:
try:
code = int(codes)
except ValueError:
return Response("Invalid status code", status=400)
return status_code(code)
choices = []
for choice in codes.split(","):
if ":" not in choice:
code = choice
weight = 1
else:
code, weight = choice.split(":")
try:
choices.append((int(code), float(weight)))
except ValueError:
return Response("Invalid status code", status=400)
code = weighted_choice(choices)
return status_code(code) | 0.000915 |
def _parse_launch_error(data):
"""
Parses a LAUNCH_ERROR message and returns a LaunchFailure object.
:type data: dict
:rtype: LaunchFailure
"""
return LaunchFailure(
data.get(ERROR_REASON, None),
data.get(APP_ID),
data.get(REQUEST_ID),
) | 0.006061 |
def get_channels(self, current=True, publish_only=False, settings=False):
"""
if "current" is false it will return all channels that a user
has published to in the past.
if publish_only is set to true, then return only the channels
that are publishable.
if settings is true, the API will make extra queries to return
the settings for each channel.
"""
if publish_only:
if current:
endpoint = 'channels/setting/publish/current'
else:
endpoint = 'channels/setting/publish'
else:
if current:
endpoint = 'channels/current'
else:
endpoint = 'channels'
result = self._call(endpoint, content_type='application/json', params=dict(settings=settings))
return [Channel(c) for c in result] | 0.003289 |
def commit_file(self, message, path, content):
"""
Add a new file as blob in the storage, add its tree entry into the index and commit the index.
:param message: str
:param path: str
:param content: str
:return:
"""
if self.git_batch_commit:
self.add_file(path, content)
self.git_batch_commit_messages.append(message)
else:
with self.lock_write():
if self.job_id:
self.read_tree(self.ref_head)
self.add_file(path, content)
return self.commit_index(message) | 0.006289 |
def list(members, meta=None) -> List: # pylint:disable=redefined-builtin
"""Creates a new list."""
return List( # pylint: disable=abstract-class-instantiated
plist(iterable=members), meta=meta
) | 0.00463 |
def attach(self, remote_entry):
"""Attach a remote entry to a local entry"""
self.name = remote_entry.name
self.sha = remote_entry.sha
self.url = remote_entry.url
self.author = remote_entry.author
return self | 0.007813 |
def secgroup_info(call=None, kwargs=None):
'''
Retrieves information for the given security group. Either a name or a
secgroup_id must be supplied.
.. versionadded:: 2016.3.0
name
The name of the security group for which to gather information. Can be
used instead of ``secgroup_id``.
secgroup_id
The ID of the security group for which to gather information. Can be
used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f secgroup_info opennebula name=my-secgroup
salt-cloud --function secgroup_info opennebula secgroup_id=5
'''
if call != 'function':
raise SaltCloudSystemExit(
'The secgroup_info function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
secgroup_id = kwargs.get('secgroup_id', None)
if secgroup_id:
if name:
log.warning(
'Both the \'secgroup_id\' and \'name\' arguments were provided. '
'\'secgroup_id\' will take precedence.'
)
elif name:
secgroup_id = get_secgroup_id(kwargs={'name': name})
else:
raise SaltCloudSystemExit(
'The secgroup_info function requires either a name or a secgroup_id '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
info = {}
response = server.one.secgroup.info(auth, int(secgroup_id))[1]
tree = _get_xml(response)
info[tree.find('NAME').text] = _xml_to_dict(tree)
return info | 0.001828 |
def get_item(self, path_, default):
"""Return item or default. In the latter, change file to have default.
Arguments:
path_ -- path to item in section/subsection structure. May be either:
["section", "subsection", ...] or
"[/]section/subsection/..." (leading slash is tolerated)
default -- value to return if item is not found
Argument 'default' is also used to determine the type of the data to return:
- str and list: returned as retrieved
- int and float: eval'ed
- bool: parsed
"""
section, path_ = self._get_section(path_)
key = path_[-1]
if key not in section:
self.set_item(path_, default)
return default
xvalue = section[key]
type_ = type(default)
if type_ in (str, list):
return xvalue
elif type_ == bool:
value = True if xvalue == "True" else False if xvalue == "False" else eval(xvalue)
elif type_ in (int, float):
value = type_(xvalue)
elif default is None:
value = None if xvalue == "None" else eval(xvalue)
else:
raise TypeError("Type not supported: {}".format(type_.__name__))
return value | 0.004425 |
def _wait_for_read_ready_or_timeout(self, timeout):
"""Returns tuple of whether stdin is ready to read and an event.
If an event is returned, that event is more pressing than reading
bytes on stdin to create a keyboard input event.
If stdin is ready, either there are bytes to read or a SIGTSTP
triggered by dsusp has been received"""
remaining_timeout = timeout
t0 = time.time()
while True:
try:
(rs, _, _) = select.select(
[self.in_stream.fileno()] + self.readers,
[], [], remaining_timeout)
if not rs:
return False, None
r = rs[0] # if there's more than one, get it in the next loop
if r == self.in_stream.fileno():
return True, None
else:
os.read(r, 1024)
if self.queued_interrupting_events:
return False, self.queued_interrupting_events.pop(0)
elif remaining_timeout is not None:
remaining_timeout = max(0, t0 + timeout - time.time())
continue
else:
continue
except select.error:
if self.sigints:
return False, self.sigints.pop()
if remaining_timeout is not None:
remaining_timeout = max(timeout - (time.time() - t0), 0) | 0.001312 |
def filter_by_type(stmts_in, stmt_type, **kwargs):
"""Filter to a given statement type.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
stmt_type : indra.statements.Statement
The class of the statement type to filter for.
Example: indra.statements.Modification
invert : Optional[bool]
If True, the statements that are not of the given type
are returned. Default: False
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
"""
invert = kwargs.get('invert', False)
logger.info('Filtering %d statements for type %s%s...' %
(len(stmts_in), 'not ' if invert else '',
stmt_type.__name__))
if not invert:
stmts_out = [st for st in stmts_in if isinstance(st, stmt_type)]
else:
stmts_out = [st for st in stmts_in if not isinstance(st, stmt_type)]
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | 0.000792 |
def hexdump(src, length=16, sep='.'):
"""
Returns src in hex dump.
From https://gist.github.com/ImmortalPC/c340564823f283fe530b
:param length: Nb Bytes by row.
:param sep: For the text part, sep will be used for non ASCII char.
:return: The hexdump
"""
result = []
for i in range(0, len(src), length):
sub_src = src[i:i + length]
hexa = ''
for h in range(0, len(sub_src)):
if h == length / 2:
hexa += ' '
h = sub_src[h]
if not isinstance(h, int):
h = ord(h)
h = hex(h).replace('0x', '')
if len(h) == 1:
h = '0' + h
hexa += h + ' '
hexa = hexa.strip(' ')
text = ''
for c in sub_src:
if not isinstance(c, int):
c = ord(c)
if 0x20 <= c < 0x7F:
text += chr(c)
else:
text += sep
result.append(('%08X: %-' + str(length * (2 + 1) + 1) + 's |%s|')
% (i, hexa, text))
return '\n'.join(result) | 0.0009 |
def put_multiple(self, packages):
"""put tasks
This method places multiple tasks in the working area and have
the dispatcher execute them.
Parameters
----------
packages : list(callable)
A list of tasks
Returns
-------
list(int)
Package indices assigned by the working area
"""
pkgidxs = [self.workingArea.put_package(p) for p in packages]
logger = logging.getLogger(__name__)
logger.info('submitting {}'.format(
', '.join(['{}'.format(self.workingArea.package_relpath(i)) for i in pkgidxs])
))
runids = self.dispatcher.run_multiple(self.workingArea, pkgidxs)
self.runid_pkgidx_map.update(zip(runids, pkgidxs))
return pkgidxs | 0.003731 |
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key.
If metadata is supplied, it will replace the
metadata of the source key being copied.
If no metadata is supplied, the source key's
metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the storage
class of the new Key to be
REDUCED_REDUNDANCY regardless of the
storage class of the key being copied.
The Reduced Redundancy Storage (RRS)
feature of S3, provides lower
redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key
will be copied to the destination
key. If False, the destination key
will have the default ACL.
Note that preserving the ACL in the
new key object will require two
additional API calls to S3, one to
retrieve the current ACL and one to
set that ACL on the new object. If
you don't care about the ACL, a value
of False will be significantly more
efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key) | 0.001669 |
def get_checker_executable(name):
"""Return checker executable in the form of a list of arguments
for subprocess.Popen"""
if programs.is_program_installed(name):
# Checker is properly installed
return [name]
else:
path1 = programs.python_script_exists(package=None,
module=name+'_script')
path2 = programs.python_script_exists(package=None, module=name)
if path1 is not None: # checker_script.py is available
# Checker script is available but has not been installed
# (this may work with pyflakes)
return [sys.executable, path1]
elif path2 is not None: # checker.py is available
# Checker package is available but its script has not been
# installed (this works with pycodestyle but not with pyflakes)
return [sys.executable, path2] | 0.001072 |
def rc_stats(stats):
"""
reverse completement stats
"""
rc_nucs = {'A':'T', 'T':'A', 'G':'C', 'C':'G', 'N':'N'}
rcs = []
for pos in reversed(stats):
rc = {}
rc['reference frequencey'] = pos['reference frequency']
rc['consensus frequencey'] = pos['consensus frequency']
rc['In'] = pos['In']
rc['Del'] = pos['Del']
rc['ref'] = rc_nucs[pos['ref']]
rc['consensus'] = (rc_nucs[pos['consensus'][0]], pos['consensus'][1])
for base, stat in list(pos.items()):
if base in rc_nucs:
rc[rc_nucs[base]] = stat
rcs.append(rc)
return rcs | 0.009231 |
def _get_zk_path_children(self, zk_conn, zk_path, name_for_error):
"""Fetch child nodes for a given Zookeeper path."""
children = []
try:
children = zk_conn.get_children(zk_path)
except NoNodeError:
self.log.info('No zookeeper node at %s', zk_path)
except Exception:
self.log.exception('Could not read %s from %s', name_for_error, zk_path)
return children | 0.006834 |
def error_mode(self):
"""error mode: enable only errors; no reports, no persistent"""
self._error_mode = True
self.disable_noerror_messages()
self.disable("miscellaneous")
if self._python3_porting_mode:
self.disable("all")
for msg_id in self._checker_messages("python3"):
if msg_id.startswith("E"):
self.enable(msg_id)
config_parser = self.cfgfile_parser
if config_parser.has_option("MESSAGES CONTROL", "disable"):
value = config_parser.get("MESSAGES CONTROL", "disable")
self.global_set_option("disable", value)
else:
self.disable("python3")
self.set_option("reports", False)
self.set_option("persistent", False)
self.set_option("score", False) | 0.002367 |
def shift(self, time: int) -> 'Interval':
"""Return a new interval shifted by `time` from self
Args:
time: time to be shifted
Returns:
Interval: interval shifted by `time`
"""
return Interval(self._begin + time, self._end + time) | 0.00678 |
def get_reply(self, method, replyroot):
"""
Process the I{reply} for the specified I{method} by unmarshalling it
into into Python object(s).
@param method: The name of the invoked method.
@type method: str
@param replyroot: The reply XML root node received after invoking the
specified method.
@type replyroot: L{Element}
@return: The unmarshalled reply. The returned value is an L{Object} or
a I{list} depending on whether the service returns a single object
or a collection.
@rtype: L{Object} or I{list}
"""
soapenv = replyroot.getChild("Envelope", envns)
soapenv.promotePrefixes()
soapbody = soapenv.getChild("Body", envns)
soapbody = self.multiref.process(soapbody)
nodes = self.replycontent(method, soapbody)
rtypes = self.returned_types(method)
if len(rtypes) > 1:
return self.replycomposite(rtypes, nodes)
if len(rtypes) == 0:
return
if rtypes[0].multi_occurrence():
return self.replylist(rtypes[0], nodes)
if len(nodes):
resolved = rtypes[0].resolve(nobuiltin=True)
return self.unmarshaller().process(nodes[0], resolved) | 0.001555 |
def get_conventional_standard_structure(
self, international_monoclinic=True):
"""
Gives a structure with a conventional cell according to certain
standards. The standards are defined in Setyawan, W., & Curtarolo,
S. (2010). High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
They basically enforce as much as possible
norm(a1)<norm(a2)<norm(a3)
Returns:
The structure in a conventional standardized cell
"""
tol = 1e-5
struct = self.get_refined_structure()
latt = struct.lattice
latt_type = self.get_lattice_type()
sorted_lengths = sorted(latt.abc)
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [0, 1, 2]],
key=lambda k: k['length'])
if latt_type in ("orthorhombic", "cubic"):
# you want to keep the c axis where it is
# to keep the C- settings
transf = np.zeros(shape=(3, 3))
if self.get_space_group_symbol().startswith("C"):
transf[2] = [0, 0, 1]
a, b = sorted(latt.abc[:2])
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [0, 1]],
key=lambda k: k['length'])
for i in range(2):
transf[i][sorted_dic[i]['orig_index']] = 1
c = latt.abc[2]
elif self.get_space_group_symbol().startswith("A"): #change to C-centering to match Setyawan/Curtarolo convention
transf[2] = [1, 0, 0]
a, b = sorted(latt.abc[1:])
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [1, 2]],
key=lambda k: k['length'])
for i in range(2):
transf[i][sorted_dic[i]['orig_index']] = 1
c = latt.abc[0]
else:
for i in range(len(sorted_dic)):
transf[i][sorted_dic[i]['orig_index']] = 1
a, b, c = sorted_lengths
latt = Lattice.orthorhombic(a, b, c)
elif latt_type == "tetragonal":
# find the "a" vectors
# it is basically the vector repeated two times
transf = np.zeros(shape=(3, 3))
a, b, c = sorted_lengths
for d in range(len(sorted_dic)):
transf[d][sorted_dic[d]['orig_index']] = 1
if abs(b - c) < tol and abs(a - c) > tol:
a, c = c, a
transf = np.dot([[0, 0, 1], [0, 1, 0], [1, 0, 0]], transf)
latt = Lattice.tetragonal(a, c)
elif latt_type in ("hexagonal", "rhombohedral"):
# for the conventional cell representation,
# we allways show the rhombohedral lattices as hexagonal
# check first if we have the refined structure shows a rhombohedral
# cell
# if so, make a supercell
a, b, c = latt.abc
if np.all(np.abs([a - b, c - b, a - c]) < 0.001):
struct.make_supercell(((1, -1, 0), (0, 1, -1), (1, 1, 1)))
a, b, c = sorted(struct.lattice.abc)
if abs(b - c) < 0.001:
a, c = c, a
new_matrix = [[a / 2, -a * math.sqrt(3) / 2, 0],
[a / 2, a * math.sqrt(3) / 2, 0],
[0, 0, c]]
latt = Lattice(new_matrix)
transf = np.eye(3, 3)
elif latt_type == "monoclinic":
# You want to keep the c axis where it is to keep the C- settings
if self.get_space_group_operations().int_symbol.startswith("C"):
transf = np.zeros(shape=(3, 3))
transf[2] = [0, 0, 1]
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [0, 1]],
key=lambda k: k['length'])
a = sorted_dic[0]['length']
b = sorted_dic[1]['length']
c = latt.abc[2]
new_matrix = None
for t in itertools.permutations(list(range(2)), 2):
m = latt.matrix
landang = Lattice(
[m[t[0]], m[t[1]], m[2]]).lengths_and_angles
if landang[1][0] > 90:
# if the angle is > 90 we invert a and b to get
# an angle < 90
landang = Lattice(
[-m[t[0]], -m[t[1]], m[2]]).lengths_and_angles
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = -1
transf[1][t[1]] = -1
transf[2][2] = 1
a, b, c = landang[0]
alpha = math.pi * landang[1][0] / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
continue
elif landang[1][0] < 90:
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = 1
transf[1][t[1]] = 1
transf[2][2] = 1
a, b, c = landang[0]
alpha = math.pi * landang[1][0] / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
if new_matrix is None:
# this if is to treat the case
# where alpha==90 (but we still have a monoclinic sg
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, 0, c]]
transf = np.zeros(shape=(3, 3))
for c in range(len(sorted_dic)):
transf[c][sorted_dic[c]['orig_index']] = 1
#if not C-setting
else:
# try all permutations of the axis
# keep the ones with the non-90 angle=alpha
# and b<c
new_matrix = None
for t in itertools.permutations(list(range(3)), 3):
m = latt.matrix
landang = Lattice(
[m[t[0]], m[t[1]], m[t[2]]]).lengths_and_angles
if landang[1][0] > 90 and landang[0][1] < landang[0][2]:
landang = Lattice(
[-m[t[0]], -m[t[1]], m[t[2]]]).lengths_and_angles
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = -1
transf[1][t[1]] = -1
transf[2][t[2]] = 1
a, b, c = landang[0]
alpha = math.pi * landang[1][0] / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
continue
elif landang[1][0] < 90 and landang[0][1] < landang[0][2]:
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = 1
transf[1][t[1]] = 1
transf[2][t[2]] = 1
a, b, c = landang[0]
alpha = math.pi * landang[1][0] / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
if new_matrix is None:
# this if is to treat the case
# where alpha==90 (but we still have a monoclinic sg
new_matrix = [[sorted_lengths[0], 0, 0],
[0, sorted_lengths[1], 0],
[0, 0, sorted_lengths[2]]]
transf = np.zeros(shape=(3, 3))
for c in range(len(sorted_dic)):
transf[c][sorted_dic[c]['orig_index']] = 1
if international_monoclinic:
# The above code makes alpha the non-right angle.
# The following will convert to proper international convention
# that beta is the non-right angle.
op = [[0, 1, 0], [1, 0, 0], [0, 0, -1]]
transf = np.dot(op, transf)
new_matrix = np.dot(op, new_matrix)
beta = Lattice(new_matrix).beta
if beta < 90:
op = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]
transf = np.dot(op, transf)
new_matrix = np.dot(op, new_matrix)
latt = Lattice(new_matrix)
elif latt_type == "triclinic":
#we use a LLL Minkowski-like reduction for the triclinic cells
struct = struct.get_reduced_structure("LLL")
a, b, c = latt.lengths_and_angles[0]
alpha, beta, gamma = [math.pi * i / 180
for i in latt.lengths_and_angles[1]]
new_matrix = None
test_matrix = [[a, 0, 0],
[b * cos(gamma), b * sin(gamma), 0.0],
[c * cos(beta),
c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
def is_all_acute_or_obtuse(m):
recp_angles = np.array(Lattice(m).reciprocal_lattice.angles)
return np.all(recp_angles <= 90) or np.all(recp_angles > 90)
if is_all_acute_or_obtuse(test_matrix):
transf = np.eye(3)
new_matrix = test_matrix
test_matrix = [[-a, 0, 0],
[b * cos(gamma), b * sin(gamma), 0.0],
[-c * cos(beta),
-c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
-c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
if is_all_acute_or_obtuse(test_matrix):
transf = [[-1, 0, 0],
[0, 1, 0],
[0, 0, -1]]
new_matrix = test_matrix
test_matrix = [[-a, 0, 0],
[-b * cos(gamma), -b * sin(gamma), 0.0],
[c * cos(beta),
c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
if is_all_acute_or_obtuse(test_matrix):
transf = [[-1, 0, 0],
[0, -1, 0],
[0, 0, 1]]
new_matrix = test_matrix
test_matrix = [[a, 0, 0],
[-b * cos(gamma), -b * sin(gamma), 0.0],
[-c * cos(beta),
-c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
-c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
if is_all_acute_or_obtuse(test_matrix):
transf = [[1, 0, 0],
[0, -1, 0],
[0, 0, -1]]
new_matrix = test_matrix
latt = Lattice(new_matrix)
new_coords = np.dot(transf, np.transpose(struct.frac_coords)).T
new_struct = Structure(latt, struct.species_and_occu, new_coords,
site_properties=struct.site_properties,
to_unit_cell=True)
return new_struct.get_sorted_structure() | 0.000677 |
def history(self, raw=True, output=False, hist_access_type='range', **kwargs):
"""Get entries from the history list.
Parameters
----------
raw : bool
If True, return the raw input.
output : bool
If True, then return the output as well.
hist_access_type : str
'range' (fill in session, start and stop params), 'tail' (fill in n)
or 'search' (fill in pattern param).
session : int
For a range request, the session from which to get lines. Session
numbers are positive integers; negative ones count back from the
current session.
start : int
The first line number of a history range.
stop : int
The final (excluded) line number of a history range.
n : int
The number of lines of history to get for a tail request.
pattern : str
The glob-syntax pattern for a search request.
Returns
-------
The msg_id of the message sent.
"""
content = dict(raw=raw, output=output, hist_access_type=hist_access_type,
**kwargs)
msg = self.session.msg('history_request', content)
self._queue_send(msg)
return msg['header']['msg_id'] | 0.003655 |
def authorized_signup_handler(resp, remote, *args, **kwargs):
"""Handle sign-in/up functionality.
:param remote: The remote application.
:param resp: The response.
:returns: Redirect response.
"""
# Remove any previously stored auto register session key
session.pop(token_session_key(remote.name) + '_autoregister', None)
# Store token in session
# ----------------------
# Set token in session - token object only returned if
# current_user.is_autenticated().
token = response_token_setter(remote, resp)
handlers = current_oauthclient.signup_handlers[remote.name]
# Sign-in/up user
# ---------------
if not current_user.is_authenticated:
account_info = handlers['info'](resp)
account_info_received.send(
remote, token=token, response=resp, account_info=account_info
)
user = oauth_get_user(
remote.consumer_key,
account_info=account_info,
access_token=token_getter(remote)[0],
)
if user is None:
# Auto sign-up if user not found
form = create_csrf_disabled_registrationform()
form = fill_form(
form,
account_info['user']
)
user = oauth_register(form)
# if registration fails ...
if user is None:
# requires extra information
session[
token_session_key(remote.name) + '_autoregister'] = True
session[token_session_key(remote.name) +
'_account_info'] = account_info
session[token_session_key(remote.name) +
'_response'] = resp
db.session.commit()
return redirect(url_for(
'.signup',
remote_app=remote.name,
))
# Authenticate user
if not oauth_authenticate(remote.consumer_key, user,
require_existing_link=False):
return current_app.login_manager.unauthorized()
# Link account
# ------------
# Need to store token in database instead of only the session when
# called first time.
token = response_token_setter(remote, resp)
# Setup account
# -------------
if not token.remote_account.extra_data:
account_setup = handlers['setup'](token, resp)
account_setup_received.send(
remote, token=token, response=resp, account_setup=account_setup
)
db.session.commit()
account_setup_committed.send(remote, token=token)
else:
db.session.commit()
# Redirect to next
next_url = get_session_next_url(remote.name)
if next_url:
return redirect(next_url)
return redirect(url_for('invenio_oauthclient_settings.index')) | 0.000344 |
def songs_delete(self, songs):
"""Delete songs from library.
Parameters:
song (list): A list of song dicts.
Returns:
list: Successfully deleted song IDs.
"""
mutations = [mc_calls.TrackBatch.delete(song['id']) for song in songs]
response = self._call(
mc_calls.TrackBatch,
mutations
)
success_ids = [
res['id']
for res in response.body['mutate_response']
if res['response_code'] == 'OK'
]
# TODO: Report failures.
# failure_ids = [
# res['id']
# for res in response.body['mutate_response']
# if res['response_code'] != 'OK'
# ]
return success_ids | 0.041118 |
def _run_germline(align_bams, items, ref_file, target, out_file):
"""Run germline calling, handling populations.
TODO:
- We could better handle trio calling with ped inputs as octopus
has special support.
"""
align_bams = " ".join(align_bams)
cores = dd.get_num_cores(items[0])
cmd = ("octopus --threads {cores} --reference {ref_file} --reads {align_bams} "
"--regions-file {target} "
"--working-directory {tmp_dir} "
"-o {tx_out_file} --legacy")
with file_transaction(items[0], out_file) as tx_out_file:
tmp_dir = os.path.dirname(tx_out_file)
do.run(cmd.format(**locals()), "Octopus germline calling")
_produce_compatible_vcf(tx_out_file, items[0])
return out_file | 0.002601 |
def same_tech(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> "Units":
""" Usage:
'self.units.same_tech(UnitTypeId.COMMANDCENTER)' or 'self.units.same_tech(UnitTypeId.ORBITALCOMMAND)'
returns all CommandCenter, CommandCenterFlying, OrbitalCommand, OrbitalCommandFlying, PlanetaryFortress
This also works with a set/list/dict parameter, e.g. 'self.units.same_tech({UnitTypeId.COMMANDCENTER, UnitTypeId.SUPPLYDEPOT})'
Untested: This should return the equivalents for Hatchery, WarpPrism, Observer, Overseer, SupplyDepot and others
"""
if isinstance(other, UnitTypeId):
other = {other}
tech_alias_types = set(other)
for unitType in other:
tech_alias = self.game_data.units[unitType.value].tech_alias
if tech_alias:
for same in tech_alias:
tech_alias_types.add(same)
return self.filter(
lambda unit: unit.type_id in tech_alias_types
or unit._type_data.tech_alias is not None
and any(same in tech_alias_types for same in unit._type_data.tech_alias)
) | 0.006745 |
def target_is_valid(self, target_id=0):
"""
Returns True or False indicating whether or not the specified
target is present and valid.
`target_id` is a target ID (or None for the first target)
"""
try:
target = self._target(target_id=target_id)
except:
return False
return target['state'] != "invalid" | 0.007692 |
def _match_overlay(self, raster, overlay_spec):
"""
Given a raster or input overlay, generate a list of matched
elements (None if no match) and corresponding tuple of match
strength values.
"""
ordering = [None]*len(overlay_spec) # Elements to overlay
strengths = [0]*len(overlay_spec) # Match strengths
elements = raster.values() if isinstance(raster, Overlay) else [raster]
for el in elements:
for pos in range(len(overlay_spec)):
strength = self._match(el, overlay_spec[pos])
if strength is None: continue # No match
elif (strength <= strengths[pos]): continue # Weaker match
else: # Stronger match
ordering[pos] = el
strengths[pos] = strength
return ordering, strengths | 0.006466 |
def replace_uid(old_uwnetid, new_uwnetid, no_custom_fields=True):
"""
Return a list of BridgeUser objects without custom fields
"""
url = author_uid_url(old_uwnetid)
if not no_custom_fields:
url += ("?%s" % CUSTOM_FIELD)
resp = patch_resource(url, '{"user":{"uid":"%[email protected]"}}' % new_uwnetid)
return _process_json_resp_data(resp,
no_custom_fields=no_custom_fields) | 0.002294 |
def find_first_available_template(self, template_name_list):
"""
Given a list of template names, find the first one that actually exists
and is available.
"""
if isinstance(template_name_list, six.string_types):
return template_name_list
else:
# Take advantage of fluent_pages' internal implementation
return _select_template_name(template_name_list) | 0.004608 |
def update(self, **kwargs):
'''
Update an existing GraftM package with new sequences and taxonomy. If no
taxonomy is provided, attempt to decorate the new sequences with
pre-existing taxonomy.
Parameters
----------
input_sequence_path: str
Path to FASTA file containing sequences to add to the update GraftM
package
input_taxonomy_path: str
Taxonomy corresponding to the sequences in input_sequence_path. If None,
then attempt to assign taxonomy by decorating the tree made out of all
sequences.
input_graftm_package_path: str
Path to the directory of the GraftM package that is to be updated
output_graftm_package_path: str
Path to the directory to which the new GraftM package will be
written to
'''
input_sequence_path = kwargs.pop('input_sequence_path')
input_taxonomy_path = kwargs.pop('input_taxonomy_path', None)
input_graftm_package_path = kwargs.pop('input_graftm_package_path')
output_graftm_package_path = kwargs.pop('output_graftm_package_path')
threads = kwargs.pop('threads', UpdateDefaultOptions.threads) #TODO: add to user options
if len(kwargs) > 0:
raise Exception("Unexpected arguments detected: %s" % kwargs)
logging.info("Reading previous GraftM package")
old_gpkg = GraftMPackage.acquire(input_graftm_package_path)
min_input_version = 3
if old_gpkg.version < min_input_version:
raise InsufficientGraftMPackageVersion(
"GraftM below version %s cannot be updated using the update function." % min_input_version +
" Unaligned sequences are not included in these packages, therefore no new"
" alignment/HMM/Tree can be created")
new_gpkg = UpdatedGraftMPackage()
new_gpkg.output = output_graftm_package_path
new_gpkg.name = output_graftm_package_path.replace(".gpkg", "")
#######################################
### Collect all unaligned sequences ###
logging.info("Concatenating unaligned sequence files")
new_gpkg.unaligned_sequences = "%s_sequences.fa" % (new_gpkg.name) #TODO: replace hard-coded paths like this with tempfiles
self._concatenate_file([old_gpkg.unaligned_sequence_database_path(),
input_sequence_path],
new_gpkg.unaligned_sequences)
#########################################################
### Parse taxonomy info up front so errors come early ###
if input_taxonomy_path:
logging.info("Reading new taxonomy information")
input_taxonomy = GreenGenesTaxonomy.read_file(input_taxonomy_path)
original_taxonomy_hash = old_gpkg.taxonomy_hash()
total_taxonomy_hash = original_taxonomy_hash.copy()
total_taxonomy_hash.update(input_taxonomy.taxonomy)
num_duplicate_taxonomies = len(total_taxonomy_hash) - \
len(input_taxonomy.taxonomy) - \
len(original_taxonomy_hash)
logging.debug("Found %i taxonomic definitions in common between the previous and updated taxonomies" % num_duplicate_taxonomies)
if num_duplicate_taxonomies > 0:
logging.warn("Found %i taxonomic definitions in common between the previous and updated taxonomies. Using the updated taxonomy in each case." % num_duplicate_taxonomies)
###############################
### Re-construct alignments ###
logging.info("Multiple sequence aligning all sequences")
new_gpkg.aligned_sequences = "%s_mafft_alignment.fa" % (new_gpkg.name)
self._align_sequences(new_gpkg.unaligned_sequences, new_gpkg.aligned_sequences, threads)
########################
### Re-construct HMM ###
logging.info("Creating HMM from alignment")
new_gpkg.hmm = "%s.hmm" % (new_gpkg.name)
new_gpkg.hmm_alignment = "%s_hmm_alignment.fa" % (new_gpkg.name)
self._get_hmm_from_alignment(new_gpkg.aligned_sequences, new_gpkg.hmm, new_gpkg.hmm_alignment)
#########################
### Re-construct tree ###
logging.info("Generating phylogenetic tree")
new_gpkg.unrooted_tree = "%s.tre" % (new_gpkg.name)
new_gpkg.unrooted_tree_log = "%s.tre.log" % (new_gpkg.name)
new_gpkg.package_type, new_gpkg.hmm_length = self._pipe_type(old_gpkg.alignment_hmm_path())
new_gpkg.unrooted_gpkg_tree_log, new_gpkg.unrooted_gpkg_tree = \
self._build_tree(new_gpkg.hmm_alignment, new_gpkg.name,
new_gpkg.package_type, self.fasttree)
##############################################
### Re-root and decorate tree if necessary ###
if input_taxonomy_path:
new_gpkg.gpkg_tree_log = new_gpkg.unrooted_tree_log
new_gpkg.gpkg_tree = new_gpkg.unrooted_gpkg_tree
else:
logging.info("Finding taxonomy for new sequences")
rerooter = Rerooter()
old_tree = Tree.get(path=old_gpkg.reference_package_tree_path(),
schema='newick')
new_tree = Tree.get(path=new_gpkg.unrooted_gpkg_tree,
schema='newick')
old_tree = rerooter.reroot(old_tree)
new_tree = rerooter.reroot(new_tree)
# TODO: Shouldn't call an underscore method, eventually use
# Rerooter instead.
rerooted_tree = rerooter.reroot_by_tree(old_tree, new_tree)
new_gpkg.gpkg_tree = "%s_gpkg.tree" % new_gpkg.name
td = TreeDecorator(
rerooted_tree,
old_gpkg.taxtastic_taxonomy_path(),
old_gpkg.taxtastic_seqinfo_path())
with tempfile.NamedTemporaryFile(suffix='tsv') as taxonomy:
td.decorate(new_gpkg.gpkg_tree, taxonomy.name, True)
total_taxonomy_hash = GreenGenesTaxonomy.read_file(taxonomy.name).taxonomy
################################
### Generating tree log file ###
logging.info("Generating phylogenetic tree log file")
new_gpkg.gpkg_tree = "%s_gpkg.tree" % new_gpkg.name
new_gpkg.gpkg_tree_log = "%s_gpkg.tree.log" % new_gpkg.name
self._generate_tree_log_file(new_gpkg.unrooted_tree,
new_gpkg.hmm_alignment,
new_gpkg.gpkg_tree,
new_gpkg.gpkg_tree_log,
new_gpkg.package_type,
self.fasttree)
################################
### Creating taxtastic files ###
logging.info("Writing new taxonomy files")
new_gpkg.tt_seqinfo = "%s_seqinfo.csv" % new_gpkg.name
new_gpkg.tt_taxonomy = "%s_taxonomy.csv" % new_gpkg.name
gtns = Getaxnseq()
gtns.write_taxonomy_and_seqinfo_files(
total_taxonomy_hash,
new_gpkg.tt_taxonomy,
new_gpkg.tt_seqinfo)
######################
### Compile refpkg ###
logging.info("Compiling pplacer refpkg")
new_gpkg.refpkg = "%s.refpkg" % (new_gpkg.name)
refpkg = self._taxit_create(new_gpkg.name,
new_gpkg.hmm_alignment,
new_gpkg.gpkg_tree,
new_gpkg.gpkg_tree_log,
new_gpkg.tt_taxonomy,
new_gpkg.tt_seqinfo,
new_gpkg.refpkg,
True)
#####################################
### Re-construct diamond database ###
logging.info("Recreating DIAMOND DB")
new_gpkg.diamond_database = "%s.dmnd" % (new_gpkg.name)
self._create_dmnd_database(new_gpkg.unaligned_sequences, new_gpkg.name)
####################
### Compile gpkg ###
logging.info("Compiling GraftM package")
new_gpkg.name = "%s.gpkg" % new_gpkg.name
GraftMPackageVersion3.compile(new_gpkg.name, new_gpkg.refpkg,
new_gpkg.hmm, new_gpkg.diamond_database,
self._define_range(new_gpkg.unaligned_sequences),
new_gpkg.unaligned_sequences,
search_hmm_files=old_gpkg.search_hmm_paths())
###################
### Test it out ###
logging.info("Testing newly updated GraftM package works")
self._test_package(new_gpkg.name)
logging.info("Finished") | 0.004349 |
def add_record_predicate(self, record_predicate,
code=RECORD_PREDICATE_FALSE,
message=MESSAGES[RECORD_PREDICATE_FALSE],
modulus=1):
"""
Add a record predicate function.
N.B., everything you can do with record predicates can also be done with
record check functions, whether you use one or the other is a matter of
style.
Arguments
---------
`record_predicate` - a function that accepts a single argument (a record
as a dictionary of values indexed by field name) and returns False if
the value is not valid
`code` - problem code to report if a record is not valid, defaults to
`RECORD_PREDICATE_FALSE`
`message` - problem message to report if a record is not valid
`modulus` - apply the check to every nth record, defaults to 1 (check
every record)
"""
assert callable(record_predicate), 'record predicate must be a callable function'
t = record_predicate, code, message, modulus
self._record_predicates.append(t) | 0.006981 |
def event_handler(event_name):
"""
Decorator for designating a handler for an event type. ``event_name`` must be a string
representing the name of the event type.
The decorated function must accept a parameter: the body of the received event,
which will be a Python object that can be encoded as a JSON (dict, list, str, int,
bool, float or None)
:param event_name: The name of the event that will be handled. Only one handler per
event name is supported by the same microservice.
"""
def wrapper(func):
func._event_handler = True
func._handled_event = event_name
return func
return wrapper | 0.007342 |
def _add_token(self, token, parent_node='root'):
"""add a token to this docgraph"""
if parent_node == 'root':
parent_node = self.root
token_node_id = 'token:{}'.format(self.token_count)
self.add_node(token_node_id, layers={self.ns, self.ns+':token'},
attr_dict={self.ns+':token': token})
self.add_edge(parent_node, token_node_id,
layers={self.ns},
edge_type=EdgeTypes.spanning_relation)
self.tokens.append(token_node_id)
self.token_count += 1 | 0.003466 |
def parse_field_options(self, *options):
"""
Parse the field options query string and return it as a dictionary.
"""
defaults = {}
for option in options:
if isinstance(option, six.text_type):
tokens = [token.strip() for token in option.split(self.view.lookup_sep)]
for token in tokens:
if not len(token.split(":")) == 2:
warnings.warn("The %s token is not properly formatted. Tokens need to be "
"formatted as 'token:value' pairs." % token)
continue
param, value = token.split(":", 1)
if any([k == param for k in ("start_date", "end_date", "gap_amount")]):
if param in ("start_date", "end_date"):
value = parser.parse(value)
if param == "gap_amount":
value = int(value)
defaults[param] = value
return defaults | 0.005566 |
def save_plain_image_as_file(self, filepath, format='png', quality=90):
"""Used for generating thumbnails. Does not include overlaid
graphics.
"""
qimg = self.get_plain_image_as_widget()
qimg.save(filepath, format=format, quality=quality) | 0.007168 |
def aggregate(self, instance, owner):
"""Given an instance and a class, aggregate together some panglers.
Walks every class in the MRO of the `owner` class, including `owner`,
collecting panglers exposed as `self.attr_name`. The resulting pangler
will be bound to the provided `instance`.
"""
try:
p = self.pangler_factory.from_store(instance, self.id)
except KeyError:
pass
else:
return p
p = self.pangler_factory(self.id)
mro = inspect.getmro(owner)
others = []
for cls in mro:
sub_p = getattr(cls, self.attr_name, None)
if sub_p is None:
continue
others.append(sub_p)
return p.combine(*others).stored_bind(instance) | 0.00246 |
def myself(self):
"""
Return a :class:`Context` referring to the current process.
"""
return self.context_class(
router=self,
context_id=mitogen.context_id,
name='self',
) | 0.008097 |
def incoming_phone_numbers(self):
"""
Access the incoming_phone_numbers
:returns: twilio.rest.api.v2010.account.incoming_phone_number.IncomingPhoneNumberList
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.IncomingPhoneNumberList
"""
if self._incoming_phone_numbers is None:
self._incoming_phone_numbers = IncomingPhoneNumberList(
self._version,
account_sid=self._solution['sid'],
)
return self._incoming_phone_numbers | 0.007366 |
def alarm_on_segfault(self, alarm):
"""Raise the specified alarm when the segmentation fault handler is executed.
Sends a backtrace.
:param AlarmType|list[AlarmType] alarm: Alarm.
"""
self.register_alarm(alarm)
for alarm in listify(alarm):
self._set('alarm-segfault', alarm.alias, multi=True)
return self._section | 0.007792 |
def pic_totalremotedischarge_v1(self):
"""Update the receiver link sequence."""
flu = self.sequences.fluxes.fastaccess
rec = self.sequences.receivers.fastaccess
flu.totalremotedischarge = rec.q[0] | 0.004717 |
def with_color_stripped(f):
"""
A function decorator for applying to `len` or imitators thereof that strips
ANSI color sequences from a string before passing it on. If any color
sequences are not followed by a reset sequence, an `UnterminatedColorError`
is raised.
"""
@wraps(f)
def colored_len(s):
s2 = re.sub(
COLOR_BEGIN_RGX + '(.*?)' + COLOR_END_RGX,
lambda m: re.sub(COLOR_BEGIN_RGX, '', m.group(1)),
s,
)
if re.search(COLOR_BEGIN_RGX, s2):
raise UnterminatedColorError(s)
return f(re.sub(COLOR_END_RGX, '', s2))
return colored_len | 0.001531 |
def color_conversion_function(start_type, target_type):
"""
Decorator to indicate a function that performs a conversion from one color
space to another.
This decorator will return the original function unmodified, however it will
be registered in the _conversion_manager so it can be used to perform color
space transformations between color spaces that do not have direct
conversion functions (e.g., Luv to CMYK).
Note: For a conversion to/from RGB supply the BaseRGBColor class.
:param start_type: Starting color space type
:param target_type: Target color space type
"""
def decorator(f):
f.start_type = start_type
f.target_type = target_type
_conversion_manager.add_type_conversion(start_type, target_type, f)
return f
return decorator | 0.002418 |
def memoize(f):
""" Decorator which caches function's return value each it is called.
If called later with same arguments, the cached value is returned.
"""
cache = {}
@wraps(f)
def inner(arg):
if arg not in cache:
cache[arg] = f(arg)
return cache[arg]
return inner | 0.003106 |
def write_loom(self, filename: PathLike, write_obsm_varm: bool = False):
"""Write ``.loom``-formatted hdf5 file.
Parameters
----------
filename
The filename.
"""
from .readwrite.write import write_loom
write_loom(filename, self, write_obsm_varm = write_obsm_varm) | 0.012048 |
def AgregarCertificado(self, tipo_certificado_deposito=None,
nro_certificado_deposito=None,
peso_neto=None,
cod_localidad_procedencia=None,
cod_prov_procedencia=None,
campania=None, fecha_cierre=None,
peso_neto_total_certificado=None,
coe_certificado_deposito=None, # WSLPGv1.6
**kwargs):
"Agrego el certificado a la liquidación / certificación de granos"
# limpio campos opcionales:
if not peso_neto_total_certificado:
peso_neto_total_certificado = None # 0 no es válido
# coe_certificado_deposito no es para LPG, unificar en futuras versiones
if tipo_certificado_deposito and int(tipo_certificado_deposito) == 332:
if coe_certificado_deposito and long(coe_certificado_deposito):
nro_certificado_deposito = coe_certificado_deposito
coe_certificado_deposito = None
cert = dict(
tipoCertificadoDeposito=tipo_certificado_deposito,
nroCertificadoDeposito=nro_certificado_deposito,
pesoNeto=peso_neto,
codLocalidadProcedencia=cod_localidad_procedencia,
codProvProcedencia=cod_prov_procedencia,
campania=campania,
fechaCierre=fecha_cierre,
pesoNetoTotalCertificado=peso_neto_total_certificado,
coeCertificadoDeposito=coe_certificado_deposito,
coe=coe_certificado_deposito, # WSLPGv1.17
pesoAjustado=peso_neto, # WSLPGv1.17
)
if self.liquidacion:
self.liquidacion['certificados'].append({'certificado': cert})
else:
self.certificacion['retiroTransferencia']['certificadoDeposito'] = cert
return True | 0.006271 |
def create(cls, name, ncpus=None):
"""Create a Moap instance based on the predictor name.
Parameters
----------
name : str
Name of the predictor (eg. Xgboost, BayesianRidge, ...)
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Returns
-------
moap : Moap instance
moap instance.
"""
try:
return cls._predictors[name.lower()](ncpus=ncpus)
except KeyError:
raise Exception("Unknown class") | 0.006734 |
def parse_arguments(
argv: Optional[Sequence[str]] = None) -> argparse.Namespace:
"""
Parse the command line arguments.
Args:
argv:
If not ``None``, use the provided command line arguments for
parsing. Otherwise, extract them automatically.
Returns:
The argparse object representing the parsed arguments.
"""
parser = argparse.ArgumentParser(
description='Git credential helper using pass as the data source.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-m', '--mapping',
type=argparse.FileType('r'),
metavar='MAPPING_FILE',
default=None,
help='A mapping file to be used, specifying how hosts '
'map to pass entries. Overrides the default mapping files from '
'XDG config locations, usually: {config_file}'.format(
config_file=DEFAULT_CONFIG_FILE))
parser.add_argument(
'-l', '--logging',
action='store_true',
default=False,
help='Print debug messages on stderr. '
'Might include sensitive information')
parser.add_argument(
'action',
type=str,
metavar='ACTION',
help='Action to preform as specified in the git credential API')
args = parser.parse_args(argv)
return args | 0.000726 |
def cum_returns(returns, starting_value=0, out=None):
"""
Compute cumulative returns from simple returns.
Parameters
----------
returns : pd.Series, np.ndarray, or pd.DataFrame
Returns of the strategy as a percentage, noncumulative.
- Time series with decimal returns.
- Example::
2015-07-16 -0.012143
2015-07-17 0.045350
2015-07-20 0.030957
2015-07-21 0.004902
- Also accepts two dimensional data. In this case, each column is
cumulated.
starting_value : float, optional
The starting returns.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
cumulative_returns : array-like
Series of cumulative returns.
"""
if len(returns) < 1:
return returns.copy()
nanmask = np.isnan(returns)
if np.any(nanmask):
returns = returns.copy()
returns[nanmask] = 0
allocated_output = out is None
if allocated_output:
out = np.empty_like(returns)
np.add(returns, 1, out=out)
out.cumprod(axis=0, out=out)
if starting_value == 0:
np.subtract(out, 1, out=out)
else:
np.multiply(out, starting_value, out=out)
if allocated_output:
if returns.ndim == 1 and isinstance(returns, pd.Series):
out = pd.Series(out, index=returns.index)
elif isinstance(returns, pd.DataFrame):
out = pd.DataFrame(
out, index=returns.index, columns=returns.columns,
)
return out | 0.000607 |
def deploy(self):
'''
Deploy salt-thin
'''
self.shell.send(
self.thin,
os.path.join(self.thin_dir, 'salt-thin.tgz'),
)
self.deploy_ext()
return True | 0.008772 |
def load(cls, database, doc_id):
"""Load a specific document from the given database.
:param database: the `Database` object to retrieve the document from
:param doc_id: the document ID
:return: the `Document` instance, or `None` if no document with the
given ID was found
"""
doc = database.get(doc_id)
if doc is None:
return None
return cls.wrap(doc) | 0.004484 |
def verify_arguments(self, args=None, kwargs=None):
"""Ensures that the arguments specified match the signature of the real method.
:raise: ``VerifyingDoubleError`` if the arguments do not match.
"""
args = self.args if args is None else args
kwargs = self.kwargs if kwargs is None else kwargs
try:
verify_arguments(self._target, self._method_name, args, kwargs)
except VerifyingBuiltinDoubleArgumentError:
if doubles.lifecycle.ignore_builtin_verification():
raise | 0.005329 |
def grid_reload_from_ids(oargrid_jobids):
"""Reload all running or pending jobs of Grid'5000 from their ids
Args:
oargrid_jobids (list): list of ``(site, oar_jobid)`` identifying the
jobs on each site
Returns:
The list of python-grid5000 jobs retrieved
"""
gk = get_api_client()
jobs = []
for site, job_id in oargrid_jobids:
jobs.append(gk.sites[site].jobs[job_id])
return jobs | 0.002237 |
def fill_notebook(work_notebook, script_blocks):
"""Writes the Jupyter notebook cells
Parameters
----------
script_blocks : list
Each list element should be a tuple of (label, content, lineno).
"""
for blabel, bcontent, lineno in script_blocks:
if blabel == 'code':
add_code_cell(work_notebook, bcontent)
else:
add_markdown_cell(work_notebook, bcontent + '\n') | 0.002304 |
def validate(
schema: GraphQLSchema,
document_ast: DocumentNode,
rules: Sequence[RuleType] = None,
type_info: TypeInfo = None,
) -> List[GraphQLError]:
"""Implements the "Validation" section of the spec.
Validation runs synchronously, returning a list of encountered errors, or an empty
list if no errors were encountered and the document is valid.
A list of specific validation rules may be provided. If not provided, the default
list of rules defined by the GraphQL specification will be used.
Each validation rule is a ValidationRule object which is a visitor object that holds
a ValidationContext (see the language/visitor API). Visitor methods are expected to
return GraphQLErrors, or lists of GraphQLErrors when invalid.
Optionally a custom TypeInfo instance may be provided. If not provided, one will be
created from the provided schema.
"""
if not document_ast or not isinstance(document_ast, DocumentNode):
raise TypeError("You must provide a document node.")
# If the schema used for validation is invalid, throw an error.
assert_valid_schema(schema)
if type_info is None:
type_info = TypeInfo(schema)
elif not isinstance(type_info, TypeInfo):
raise TypeError(f"Not a TypeInfo object: {inspect(type_info)}")
if rules is None:
rules = specified_rules
elif not isinstance(rules, (list, tuple)):
raise TypeError("Rules must be passed as a list/tuple.")
context = ValidationContext(schema, document_ast, type_info)
# This uses a specialized visitor which runs multiple visitors in parallel,
# while maintaining the visitor skip and break API.
visitors = [rule(context) for rule in rules]
# Visit the whole document with each instance of all provided rules.
visit(document_ast, TypeInfoVisitor(type_info, ParallelVisitor(visitors)))
return context.errors | 0.003127 |
def pkcs7_unpad(data):
"""
Remove the padding bytes that were added at point of encryption.
Implementation copied from pyaspora:
https://github.com/mjnovice/pyaspora/blob/master/pyaspora/diaspora/protocol.py#L209
"""
if isinstance(data, str):
return data[0:-ord(data[-1])]
else:
return data[0:-data[-1]] | 0.002874 |
def OnMacroToolbarToggle(self, event):
"""Macro toolbar toggle event handler"""
self.main_window.macro_toolbar.SetGripperVisible(True)
macro_toolbar_info = self.main_window._mgr.GetPane("macro_toolbar")
self._toggle_pane(macro_toolbar_info)
event.Skip() | 0.006757 |
def edges(self, **kwargs):
"""Get the known catalog edges, formed between two resources.
:param \*\*kwargs: The rest of the keyword arguments are passed
to the _query function.
:returns: A generating yielding Edges.
:rtype: :class:`pypuppetdb.types.Edge`
"""
edges = self._query('edges', **kwargs)
for edge in edges:
identifier_source = edge['source_type'] + \
'[' + edge['source_title'] + ']'
identifier_target = edge['target_type'] + \
'[' + edge['target_title'] + ']'
yield Edge(source=self.resources[identifier_source],
target=self.resources[identifier_target],
relationship=edge['relationship'],
node=edge['certname']) | 0.004728 |
def _draw_tiles(self, x_offset, y_offset, bg):
"""Render all visible tiles a layer at a time."""
count = 0
for layer_name, c_filters, t_filters in self._get_features():
colour = (self._256_PALETTE[layer_name]
if self._screen.colours >= 256 else self._16_PALETTE[layer_name])
for x, y, z, tile, satellite in sorted(self._tiles.values(), key=lambda k: k[0]):
# Don't draw the wrong type or zoom of tile.
if satellite != self._satellite or z != self._zoom:
continue
# Convert tile location into pixels and draw the tile.
x *= self._size
y *= self._size
if satellite:
count += self._draw_satellite_tile(
tile,
int((x-x_offset + self._screen.width // 4) * 2),
int(y-y_offset + self._screen.height // 2))
else:
count += self._draw_tile_layer(tile, layer_name, c_filters, colour, t_filters,
x - x_offset, y - y_offset, bg)
return count | 0.004992 |
def compartments(self):
"""lists compartments the metabolites are in"""
if self._compartments is None:
self._compartments = {met.compartment for met in self._metabolites
if met.compartment is not None}
return self._compartments | 0.006734 |
def remove(key, val, delimiter=DEFAULT_TARGET_DELIM):
'''
.. versionadded:: 0.17.0
Remove a value from a list in the grains config file
key
The grain key to remove.
val
The value to remove.
delimiter
The key can be a nested dict key. Use this parameter to
specify the delimiter you use, instead of the default ``:``.
You can now append values to a list in nested dictionary grains. If the
list doesn't exist at this level, it will be created.
.. versionadded:: 2015.8.2
CLI Example:
.. code-block:: bash
salt '*' grains.remove key val
'''
grains = get(key, [], delimiter)
if not isinstance(grains, list):
return 'The key {0} is not a valid list'.format(key)
if val not in grains:
return 'The val {0} was not in the list {1}'.format(val, key)
grains.remove(val)
while delimiter in key:
key, rest = key.rsplit(delimiter, 1)
_grain = get(key, None, delimiter)
if isinstance(_grain, dict):
_grain.update({rest: grains})
grains = _grain
return setval(key, grains) | 0.000868 |
def to_database(self, manager=None):
"""Send the model to the PyBEL database
This function wraps :py:func:`pybel.to_database`.
Parameters
----------
manager : Optional[pybel.manager.Manager]
A PyBEL database manager. If none, first checks the PyBEL
configuration for ``PYBEL_CONNECTION`` then checks the
environment variable ``PYBEL_REMOTE_HOST``. Finally,
defaults to using SQLite database in PyBEL data directory
(automatically configured by PyBEL)
Returns
-------
network : Optional[pybel.manager.models.Network]
The SQLAlchemy model representing the network that was uploaded.
Returns None if upload fails.
"""
network = pybel.to_database(self.model, manager=manager)
return network | 0.002323 |
def _add_modifiers(self, sql, blueprint, column):
"""
Add the column modifiers to the deifinition
"""
for modifier in self._modifiers:
method = '_modify_%s' % modifier
if hasattr(self, method):
sql += getattr(self, method)(blueprint, column)
return sql | 0.005988 |
def init_current_directory():
"""Initialize and create dry config file(s) inside current directory"""
settings_directory=project_path+'/.dry'
settings_file=settings_directory+'/config.py'
if os.path.isdir(settings_directory):
# already initialized
print("directory already initialized.")
return
# init
os.makedirs(settings_directory)
f = open(settings_file,'w')
print(sample_config_file, file=f)
f.close() | 0.02027 |
def _handle_decl_list(self, node, scope, ctxt, stream):
"""Handle For nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling decl list")
# just handle each declaration
for decl in node.decls:
self._handle_node(decl, scope, ctxt, stream) | 0.005376 |
def heartbeat(self):
"""
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
"""
try:
with create_session() as session:
job = session.query(BaseJob).filter_by(id=self.id).one()
make_transient(job)
session.commit()
if job.state == State.SHUTDOWN:
self.kill()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
if not is_unit_test:
# Figure out how long to sleep for
sleep_for = 0
if job.latest_heartbeat:
seconds_remaining = self.heartrate - \
(timezone.utcnow() - job.latest_heartbeat)\
.total_seconds()
sleep_for = max(0, seconds_remaining)
sleep(sleep_for)
# Update last heartbeat time
with create_session() as session:
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.latest_heartbeat = timezone.utcnow()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
self.log.debug('[heartbeat]')
except OperationalError as e:
self.log.error("Scheduler heartbeat got an exception: %s", str(e)) | 0.001418 |
def vamp_1_score(K, C00_train, C0t_train, Ctt_train, C00_test, C0t_test, Ctt_test, k=None):
""" Computes the VAMP-1 score of a kinetic model.
Ranks the kinetic model described by the estimation of covariances C00, C0t and Ctt,
defined by:
:math:`C_{0t}^{train} = E_t[x_t x_{t+\tau}^T]`
:math:`C_{tt}^{train} = E_t[x_{t+\tau} x_{t+\tau}^T]`
These model covariances might have been subject to symmetrization or reweighting,
depending on the type of model used.
The covariances C00, C0t and Ctt of the test data are direct empirical estimates.
singular vectors U and V using the test data
with covariances C00, C0t, Ctt. U and V should come from the SVD of the symmetrized
transition matrix or Koopman matrix:
:math:`(C00^{train})^{-(1/2)} C0t^{train} (Ctt^{train})^{-(1/2)} = U S V.T`
Parameters:
-----------
K : ndarray(n, k)
left singular vectors of the symmetrized transition matrix or Koopman matrix
C00_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{00}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_train : ndarray(n, n)
time-lagged covariance matrix of the training data, defined by
:math:`C_{0t}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{tt}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
C00_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{00}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_test : ndarray(n, n)
time-lagged covariance matrix of the test data, defined by
:math:`C_{0t}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{tt}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
k : int
number of slow processes to consider in the score
Returns:
--------
vamp1 : float
VAMP-1 score
"""
from pyemma._ext.variational.solvers.direct import spd_inv_sqrt
# SVD of symmetrized operator in empirical distribution
U, S, V = _svd_sym_koopman(K, C00_train, Ctt_train)
if k is not None:
U = U[:, :k]
# S = S[:k][:, :k]
V = V[:, :k]
A = spd_inv_sqrt(mdot(U.T, C00_test, U))
B = mdot(U.T, C0t_test, V)
C = spd_inv_sqrt(mdot(V.T, Ctt_test, V))
# compute trace norm (nuclear norm), equal to the sum of singular values
score = np.linalg.norm(mdot(A, B, C), ord='nuc')
return score | 0.00661 |
def cloud_cover_to_irradiance_clearsky_scaling(self, cloud_cover,
method='linear',
**kwargs):
"""
Estimates irradiance from cloud cover in the following steps:
1. Determine clear sky GHI using Ineichen model and
climatological turbidity.
2. Estimate cloudy sky GHI using a function of
cloud_cover e.g.
:py:meth:`~ForecastModel.cloud_cover_to_ghi_linear`
3. Estimate cloudy sky DNI using the DISC model.
4. Calculate DHI from DNI and DHI.
Parameters
----------
cloud_cover : Series
Cloud cover in %.
method : str, default 'linear'
Method for converting cloud cover to GHI.
'linear' is currently the only option.
**kwargs
Passed to the method that does the conversion
Returns
-------
irrads : DataFrame
Estimated GHI, DNI, and DHI.
"""
solpos = self.location.get_solarposition(cloud_cover.index)
cs = self.location.get_clearsky(cloud_cover.index, model='ineichen',
solar_position=solpos)
method = method.lower()
if method == 'linear':
ghi = self.cloud_cover_to_ghi_linear(cloud_cover, cs['ghi'],
**kwargs)
else:
raise ValueError('invalid method argument')
dni = disc(ghi, solpos['zenith'], cloud_cover.index)['dni']
dhi = ghi - dni * np.cos(np.radians(solpos['zenith']))
irrads = pd.DataFrame({'ghi': ghi, 'dni': dni, 'dhi': dhi}).fillna(0)
return irrads | 0.002275 |
def _get_consent_id(self, requester, user_id, filtered_attr):
"""
Get a hashed id based on requester, user id and filtered attributes
:type requester: str
:type user_id: str
:type filtered_attr: dict[str, str]
:param requester: The calling requester
:param user_id: The authorized user id
:param filtered_attr: a list containing all attributes to be sent
:return: an id
"""
filtered_attr_key_list = sorted(filtered_attr.keys())
hash_str = ""
for key in filtered_attr_key_list:
_hash_value = "".join(sorted(filtered_attr[key]))
hash_str += key + _hash_value
id_string = "%s%s%s" % (requester, user_id, hash_str)
return urlsafe_b64encode(hashlib.sha512(id_string.encode("utf-8")).hexdigest().encode("utf-8")).decode("utf-8") | 0.003452 |
def encoder_vgg(x, enc_final_size, reuse=False, scope_prefix='', hparams=None,
is_training=True):
"""VGG network to use as encoder without the top few layers.
Can be pretrained.
Args:
x: The image to encode. In the range 0 to 1.
enc_final_size: The desired size of the encoding.
reuse: To reuse in variable scope or not.
scope_prefix: The prefix before the scope name.
hparams: The python hparams.
is_training: boolean value indicating if training is happening.
Returns:
The generated image.
"""
with tf.variable_scope(scope_prefix + 'encoder', reuse=reuse):
# Preprocess input
x *= 256
x = x - COLOR_NORMALIZATION_VECTOR
with arg_scope(vgg.vgg_arg_scope()):
# Padding because vgg_16 accepts images of size at least VGG_IMAGE_SIZE.
x = tf.pad(x, [[0, 0], [0, VGG_IMAGE_SIZE - IMG_WIDTH],
[0, VGG_IMAGE_SIZE - IMG_HEIGHT], [0, 0]])
_, end_points = vgg.vgg_16(
x,
num_classes=enc_final_size,
is_training=is_training)
pool5_key = [key for key in end_points.keys() if 'pool5' in key]
assert len(pool5_key) == 1
enc = end_points[pool5_key[0]]
# Undoing padding.
enc = tf.slice(enc, [0, 0, 0, 0], [-1, 2, 2, -1])
enc_shape = enc.get_shape().as_list()
enc_shape[0] = -1
enc_size = enc_shape[1] * enc_shape[2] * enc_shape[3]
enc_flat = tf.reshape(enc, (-1, enc_size))
enc_flat = tf.nn.dropout(enc_flat, hparams.enc_keep_prob)
enc_flat = tf.layers.dense(
enc_flat,
enc_final_size,
kernel_initializer=tf.truncated_normal_initializer(stddev=1e-4,))
if hparams.enc_pred_use_l2norm:
enc_flat = tf.nn.l2_normalize(enc_flat, 1)
return enc_flat | 0.007349 |
def context(self, context):
"""Sets the context that Selenium commands are running in using
a `with` statement. The state of the context on the server is
saved before entering the block, and restored upon exiting it.
:param context: Context, may be one of the class properties
`CONTEXT_CHROME` or `CONTEXT_CONTENT`.
Usage example::
with selenium.context(selenium.CONTEXT_CHROME):
# chrome scope
... do stuff ...
"""
initial_context = self.execute('GET_CONTEXT').pop('value')
self.set_context(context)
try:
yield
finally:
self.set_context(initial_context) | 0.002789 |
def xml_import(self,
xml_fname=None,
xml_content=None,
ns_mapping=None,
embedded_predicate=None,
id_and_revision_extractor=None,
extract_empty_embedded=False,
keep_attrs_in_created_reference=True,
transformer=None):
"""
This is the generic XML import function for dingos. Its parameters
are as follows:
- xml_fname: Filename of the XML file to be read
- xml_content: Alternatively, the xml_content can be provided as string
or as XMLNode (i.e., a piece of XML that has already been parsed)
- ns_mapping: A dictionary that may already contain mappings of namespaces
to namespace URIs. Attention: this dictionary will be enriched with namespace
information found in the XML file!!!
- embedded_predicate:
A function that, when given an XML node and a child node, determines whether
the child node should be treated as separate entity that has been embedded.
Please refer to existing import modules such as for STIX or OpenIOC for
examples of how to use this parameter.
- id_and_revision_extractor:
A function that, when given an XML node and a child node, determines whether
this node specifies an identifier and possibly a timestamp.
Please refer to existing import modules such as for STIX or OpenIOC for
examples of how to use this parameter.
- extract_empty_embedded:
A flag (True/False) governing whether elements that are recognized as
being embedded but contain no childs should be extracted as separate
object or not. The default is False; the setting "True" may be necessary
in cases where there are embedded objects that contain all its information
in attributes rather than using child elements.
- keep_attrs_in_created_reference:
A flag (True/False) governing the shape of the reference created for
an embedded object: when an embedding is recognized, it is extracted
and a reference using 'idref' inserted instead. If 'keep_attrs_in_created_reference'
is True, then the top-level attributes contained in the found embedded object
are also retained in the reference.
- transformer:
A function that, when given an element name and a DingoObjDict containing
the result of importing the contents under the element of the given name,
may or may not change the element name and transform the DingoObjDict.
Please refer to existing import MANTIS modules such as for OpenIOC for
examples of how to use this parameter.
Note: a good starting point for understanding how to use the python bindings
of libxml2 is http://mikekneller.com/kb/python/libxml2python/part1.
"""
generated_id_count = {}
# Fill defaults
if not ns_mapping:
nas_mapping = {}
if not transformer:
transformer = lambda x, y: (x, y)
# We use the _import_pending_stack to hold extracted embedded objects
# that still need to be processed
_import_pending_stack = deque()
# We collect the read embedded objects in the following list
embedded_objects = deque()
def xml_import_(element, depth=0, type_info=None, inherited_id_and_rev_info=None):
"""
Recursive import function
"""
if not inherited_id_and_rev_info:
inherited_id_and_rev_info = main_id_and_rev_info.copy()
fresh_inherited_id_and_rev_info = inherited_id_and_rev_info.copy()
if element.name == 'comment':
return None
#try:
# namespace = element.ns()
# ns_mapping[namespace.name]=namespace.content
#except:
# pass
result = DingoObjDict()
# Add properties to result dictionary for this element
if element.properties:
for prop in element.properties:
if not prop:
break
if prop.type == 'attribute':
try:
result["@%s:%s" % (prop.ns().name, prop.name)] = prop.content
except:
result["@%s" % prop.name] = prop.content
# see if there is a namespace
try:
ns = element.ns().name
result["@@ns"] = ns
except:
pass
# prepare list for keeping resulting dictionaries of child elements
element_dicts = []
# While looking at the child-elements, we have to keep track
# of certain data.
# Firstly: keep track whether we have seen text content that is not whitespace --
# if that happens, we know that this element contains mixed
# content and we will back off and just dump the element contents
# as is as value into the dictionary.
non_ws_content = False
# Secondly: If the element contains cdata stuff, we will see
# that one (well, the only) child has type cdata. So if
# we find such a child, we set the flag
cdata_content = False
# Thirdly: we keep track of how many different child-element-names
# we have seen.
#
# - If we find that we have exactly one distinct name,
# we will generate a dictionary of form
# {<Element_Name> : [ <list of child elemen dictionaries> ]}
# - If we find that we have as many distinct names as we
# child elements, we create a dictionary mapping each child element
# name to its dictionary representation
# - If we find that we have less child element names than
# we have children, we know that at least one name
# occured more than once. Our dictionary representation cannot
# deal with that, and we back off and dump the contents as they
# are marked as 'xml' content with the '@@type' attribute.
name_set = {}
previous_seen_child = None
double_occurrance = False
element_child_count = 0
child = element.children
while child is not None:
#if child_name=='comment':
# pass
if child.name == 'text':
# If we have non-whitespace content in one of the children,
# we set the non_ws_content flag
content = child.content.strip()
if content != "":
non_ws_content = True
elif child.type == 'cdata':
logger.debug("!!!!FOUND CDATA")
# If one of the children (actually, it should be the only child)
# has type cdata, we know that the parent element contains cdata
# and set the cdata_content flag accordingly
cdata_content = True
else:
# we have found an element, so we recurse into it.
element_child_count += 1
if previous_seen_child and (child.name in name_set) and (not child.name == previous_seen_child):
double_occurrance = True
name_set[child.name] = None
if embedded_predicate:
embedded_ns = embedded_predicate(element, child, ns_mapping)
logger.debug("Embedded ns is %s" % embedded_ns)
if embedded_ns:
inherited_id_and_rev_info = fresh_inherited_id_and_rev_info.copy()
# There is an embedded object. We therefore
# replace the contents of the element with an element
# containing an idref (and, since we might need them,
# all attributes of the embedded element)
if type(embedded_ns) == type({}):
# If necessary, the embedded_predicate can return more information
# than namespace information, namely we can can hand down
# id and revision info that has been derived wenn the embedding
# was detected. For backward compatibility,
# we further allow returning of a string; if, however,
# a dictionary is returned, there is id_and_revision_info.
id_and_revision_info = embedded_ns.get('id_and_revision_info',
id_and_revision_extractor(child))
embedded_ns = embedded_ns.get('embedded_ns',None)
else:
id_and_revision_info = id_and_revision_extractor(child)
# See whether stuff needs to be inherited
if not 'id' in id_and_revision_info or not id_and_revision_info['id']:
if 'id' in inherited_id_and_rev_info:
parent_id = inherited_id_and_rev_info['id']
if parent_id in generated_id_count:
gen_counter = generated_id_count[parent_id]
gen_counter +=1
else:
gen_counter = 0
generated_id_count[parent_id] = gen_counter
(parent_namespace, parent_uid) = parent_id.split(':')
generated_id = "%s:emb%s-in-%s" % (parent_namespace,gen_counter,parent_uid)
logger.info("Found embedded %s without id and generated id %s" % (element.name,generated_id))
id_and_revision_info['id'] = generated_id
id_and_revision_info['id_inherited'] = True
else:
logger.error("Attempt to import object (element name %s) without id -- object is ignored" % elt_name)
#cybox_id = gen_cybox_id(iobject_type_name)
if not id_and_revision_info.get('timestamp', None):
if inherited_id_and_rev_info and 'timestamp' in inherited_id_and_rev_info:
id_and_revision_info['timestamp'] = inherited_id_and_rev_info['timestamp']
id_and_revision_info['ts_inherited'] = True
else:
inherited_id_and_rev_info['timestamp'] = id_and_revision_info['timestamp']
if 'id' in id_and_revision_info:
# If the identifier has no namespace info (this may occur, e.g. for
# embedded OpenIOC in STIX, we take the namespace inherited from the
# embedding object
if (not ':' in id_and_revision_info['id']
and inherited_id_and_rev_info['id']
and ':' in inherited_id_and_rev_info['id']):
id_and_revision_info['id'] = "%s:%s" % (inherited_id_and_rev_info['id'].split(':')[0],
id_and_revision_info['id'])
id_and_revision_info['ns_inherited'] = True
inherited_id_and_rev_info['id'] = id_and_revision_info['id']
if keep_attrs_in_created_reference:
reference_dict = extract_attributes(child, prefix_key_char='@',
dict_constructor=DingoObjDict)
else:
reference_dict = DingoObjDict()
reference_dict['@idref'] = id_and_revision_info['id']
reference_dict['@@timestamp'] = id_and_revision_info['timestamp']
try:
reference_dict['@@ns'] = child.ns().name
except:
reference_dict['@@ns'] = None
if embedded_ns == True:
embedded_ns = None
logger.debug("Setting embedded type info to %s" % embedded_ns)
reference_dict['@@embedded_type_info'] = embedded_ns
element_dicts.append((child.name, reference_dict))
if (child.children or child.content) \
or extract_empty_embedded \
or 'extract_empty_embedded' in id_and_revision_info:
id_and_revision_info['inherited'] = fresh_inherited_id_and_rev_info.copy()
if 'inherited' in id_and_revision_info['inherited']:
for key in id_and_revision_info['inherited']['inherited']:
if not key in id_and_revision_info['inherited']:
id_and_revision_info['inherited'][key] = id_and_revision_info['inherited']['inherited'][key]
del(id_and_revision_info['inherited']['inherited'])
logger.debug(
"Adding XML subtree starting with element %s and type info %s to pending stack." % (
id_and_revision_info, embedded_ns))
_import_pending_stack.append((id_and_revision_info, embedded_ns, child))
else:
# For example, in cybox 1.0, the following occurs::
# <EmailMessageObj:File xsi:type="FileObj:FileObjectType" object_reference="cybox:object-3cf6a958-5c3f-11e2-a06c-0050569761d3"/>
# This is only a reference and may not be confused with the definition of the object,
# which occurs someplace else -- otherwise, the (almost) empty reference is created as object
# and may overwrite the object resulting from the real definition.
logger.info(
"Not adding element %s with type info %s to pending stack because element is empty." % (
id_and_revision_info, embedded_ns))
else:
child_import = xml_import_(child, depth + 1, inherited_id_and_rev_info=inherited_id_and_rev_info)
if child_import:
element_dicts.append(child_import)
else:
child_import = xml_import_(child, depth + 1, inherited_id_and_rev_info=inherited_id_and_rev_info)
if child_import:
element_dicts.append(child_import)
child = child.next
# now, we decide what to do with this node
distinct_child_count = len(name_set.keys())
if distinct_child_count == 0:
# No child elements were detected, so we dump the content into
# the value
result['_value'] = element.content
if cdata_content:
# If this is a cdata element, we mark it as such
result['@@content_type'] = 'cdata'
elif non_ws_content == True:
# We have mixed content, so we dump it
sub_child = element.children
serialization = ''
while sub_child:
serialization += sub_child.serialize()
sub_child = sub_child.next
result['_value'] = serialization.strip()
#result['_value']=element.serialize()
result['@@content_type'] = 'mixed'
elif double_occurrance: # distinct_child_count >1 and (distinct_child_count) < element_child_count:
# We have a structure our dictionary representation cannot
# deal with -- so we dump it
logger.warning("Cannot deal with XML structure of %s (children %s, count %s): will dump to value" % (
element.name, name_set.keys(), element_child_count))
sub_child = element.children
serialization = ''
while sub_child:
serialization += sub_child.serialize()
sub_child = sub_child.next
result['_value'] = serialization.strip()
#result['_value']=element.serialize()
result['@@content_type'] = 'xml'
else:
previously_written_name = None
for (name, element_dict) in element_dicts:
if not previously_written_name or name != previously_written_name:
result[name] = element_dict
previously_written_name = name
else: # if name == previously_written_name:
if type(result[name]) == type([]):
result[name].append(element_dict)
else:
result[name] = [result[name], element_dict]
if type_info:
result['@@embedded_type_info'] = type_info
element_ns = None
try:
element_ns = element.ns().name
except:
pass
return transformer(element.name, result)
if xml_content:
if isinstance(xml_content,libxml2.xmlNode):
root = xml_content
else:
doc = libxml2.parseDoc(xml_content)
root = doc.getRootElement()
else:
doc = libxml2.recoverFile(xml_fname)
root = doc.getRootElement()
with open(xml_fname, 'r') as content_file:
xml_content = content_file.read()
# Extract namespace information (if any)
try:
ns_def = root.nsDefs()
while ns_def:
ns_mapping[ns_def.name] = ns_def.content
ns_def = ns_def.next
except:
pass
# Extract ID and timestamp for root element
main_id_and_rev_info = id_and_revision_extractor(root)
# Call the internal recursive function. This returns
# - name of the top-level element
# - DingoObjDict resulting from import
# As side effect, it pushes the XML nodes of
# found embedded objects onto the pending stack
(main_elt_name, main_elt_dict) = xml_import_(root, 0)
# We now go through the pending stack.
# For each found embedded object, xml_import_ pushes
# the following triple on the stack:
# - id_and_revision_info: A dictionary, containing
# identifier and (possibly) timestamp information
# for that object
# - type_info: Information about the type of the
# embedded object (can be None)
# - the XML node that describes the embedded object
do_not_process_list = []
while _import_pending_stack:
(id_and_revision_info, type_info, elt) = _import_pending_stack.pop()
if 'defer_processing' in id_and_revision_info:
do_not_process_list.append((id_and_revision_info,type_info,elt))
else:
(elt_name, elt_dict) = xml_import_(elt, 0,
type_info=type_info,
inherited_id_and_rev_info=id_and_revision_info.copy())
embedded_objects.append({'id_and_rev_info': id_and_revision_info,
'elt_name': elt_name,
'dict_repr': elt_dict})
result= {'id_and_rev_info': main_id_and_rev_info,
'elt_name': main_elt_name,
'dict_repr': main_elt_dict,
'embedded_objects': embedded_objects,
'unprocessed' : do_not_process_list,
'file_content': xml_content}
#pp.pprint(result)
return result | 0.006835 |
def add_interceptor(self, interceptor):
"""
Adds an interceptor for this map. Added interceptor will intercept operations and execute user defined methods.
:param interceptor: (object), interceptor for the map which includes user defined methods.
:return: (str),id of registered interceptor.
"""
return self._encode_invoke(map_add_interceptor_codec, interceptor=self._to_data(interceptor)) | 0.011416 |
def kv_format_object(o, keys=None, separator=DEFAULT_SEPARATOR):
"""Formats an object's attributes. Useful for object representation
implementation. Will skip methods or private attributes.
For more details see :func:`kv_format`.
:param o:
Object to format.
:param collections.Sequence keys:
Explicit list of attributes to format. ``None`` means all public
visible attribute for the given object will be formatted.
:param str separator:
Value between two pairs.
:return:
Formatted Object attributes.
:rtype:
:data:`six.text_type <six:six.text_type>`
"""
if keys is None:
key_values = []
for k, v in ((x, getattr(o, x)) for x in sorted(dir(o))):
if k.startswith('_') or isroutine(v):
continue
key_values += (k, v),
else:
key_values = ((k, getattr(o, k)) for k in keys)
return kv_format_pairs(key_values, separator) | 0.001021 |
def pexpire_at(self, _time):
""" Sets the expiration time of :prop:key_prefix to @_time
@_time: absolute Unix timestamp (milliseconds
since January 1, 1970)
"""
return self._client.pexpireat(self.key_prefix, round(_time)) | 0.007326 |
def get_application_choices():
"""
Get the select options for the application selector
:return:
"""
result = []
keys = set()
for ct in ContentType.objects.order_by('app_label', 'model'):
try:
if issubclass(ct.model_class(), TranslatableModel) and ct.app_label not in keys:
result.append(('{}'.format(ct.app_label), '{}'.format(ct.app_label.capitalize())))
keys.add(ct.app_label)
except TypeError:
continue
return result | 0.005725 |
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credentials = None
if self._cache:
json = self._cache.get(self._key_name)
if json:
credentials = client.Credentials.new_from_json(json)
if credentials is None:
entity = self._get_entity()
if entity is not None:
credentials = getattr(entity, self._property_name)
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
if credentials and hasattr(credentials, 'set_store'):
credentials.set_store(self)
return credentials | 0.00271 |
def get_unaligned_lines(self):
"""get the lines that are not aligned"""
sys.stderr.write("error unimplemented get_unaligned_lines\n")
sys.exit()
return [self._lines[x-1] for x in self._unaligned] | 0.004739 |
def convert_array(array):
"""
Converts an ARRAY string stored in the database back into a Numpy array.
Parameters
----------
array: ARRAY
The array object to be converted back into a Numpy array.
Returns
-------
array
The converted Numpy array.
"""
out = io.BytesIO(array)
out.seek(0)
return np.load(out) | 0.002667 |
def format_dapi_score(cls, meta, offset):
'''Format the line with DAPI user rating and number of votes'''
if 'average_rank' and 'rank_count' in meta:
label = (cls._nice_strings['average_rank'] + ':').ljust(offset + 2)
score = cls._format_field(meta['average_rank'])
votes = ' ({num} votes)'.format(num=meta['rank_count'])
return label + score + votes
else:
return '' | 0.004444 |
def getSNPSetsList() :
"""Return the names of all imported snp sets"""
import rabaDB.filters as rfilt
f = rfilt.RabaQuery(SNPMaster)
names = []
for g in f.iterRun() :
names.append(g.setName)
return names | 0.047393 |
def not_equal(lhs, rhs):
"""Returns the result of element-wise **not equal to** (!=) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if corresponding elements are different,
otherwise return 0(false).
Equivalent to ``lhs != rhs`` and ``mx.nd.broadcast_not_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (z == y).asnumpy()
array([[ 1., 0.],
[ 0., 1.]], dtype=float32)
>>> (x != 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x != y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.not_equal(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> (z != y).asnumpy()
array([[ 0., 1.],
[ 1., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_not_equal,
lambda x, y: 1 if x != y else 0,
_internal._not_equal_scalar,
None) | 0.002567 |
def write(self, s):
"""
Write wrapper.
Parameters
----------
s : bytes
Bytes to write
"""
try:
return self.handle.write(s)
except OSError:
print()
print("Piksi disconnected")
print()
raise IOError | 0.006042 |
def channels_leave(self, room_id, **kwargs):
"""Causes the callee to be removed from the channel."""
return self.__call_api_post('channels.leave', roomId=room_id, kwargs=kwargs) | 0.015544 |
def write_config(config, app_dir, filename='configuration.json'):
"""Write configuration to the applicaiton directory."""
path = os.path.join(app_dir, filename)
with open(path, 'w') as f:
json.dump(
config, f, indent=4, cls=DetectMissingEncoder,
separators=(',', ': ')) | 0.003195 |
def process_non_raw_string_token(self, prefix, string_body, start_row):
'''
check for bad escapes in a non-raw string.
prefix: lowercase string of eg 'ur' string prefix markers.
string_body: the un-parsed body of the string, not including the quote
marks.
start_row: integer line number in the source.
'''
if 'u' in prefix:
if string_body.find('\\0') != -1:
self.add_message('null-byte-unicode-literal', line=start_row) | 0.003906 |
def face_adjacency_angles(self):
"""
Return the angle between adjacent faces
Returns
--------
adjacency_angle : (n,) float
Angle between adjacent faces
Each value corresponds with self.face_adjacency
"""
pairs = self.face_normals[self.face_adjacency]
angles = geometry.vector_angle(pairs)
return angles | 0.005063 |
def io_check(*args, func=None):
"""Check if arguments are file-like object."""
func = func or inspect.stack()[2][3]
for var in args:
if not isinstance(var, io.IOBase):
name = type(var).__name__
raise IOObjError(
f'Function {func} expected file-like object, {name} got instead.') | 0.005917 |
def _image_summary(self, tf_name, images, step=None):
"""
Log a list of images.
References:
https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/04-utils/tensorboard/logger.py#L22
Example:
>>> tf_name = 'foo'
>>> value = ([0, 1, 2, 3, 4, 5], [1, 20, 10, 22, 11])
>>> self = Logger(None, is_dummy=True)
>>> images = [np.random.rand(10, 10), np.random.rand(10, 10)]
>>> summary = self._image_summary(tf_name, images, step=None)
>>> assert len(summary.value) == 2
>>> assert summary.value[0].image.width == 10
"""
img_summaries = []
for i, img in enumerate(images):
# Write the image to a string
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format="png")
# Create an Image object
img_sum = summary_pb2.Summary.Image(
encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1]
)
# Create a Summary value
img_value = summary_pb2.Summary.Value(tag='{}/{}'.format(tf_name, i),
image=img_sum)
img_summaries.append(img_value)
summary = summary_pb2.Summary()
summary.value.add(tag=tf_name, image=img_sum)
summary = summary_pb2.Summary(value=img_summaries)
return summary | 0.002559 |
def _CreateDictReader(self, line_reader):
"""Returns a reader that processes each row and yields dictionaries.
csv.DictReader does this job well for single-character delimiters; parsers
that need multi-character delimiters need to override this method.
Args:
line_reader (iter): yields lines from a file-like object.
Returns:
iter: a reader of dictionaries, as returned by csv.DictReader().
"""
delimiter = self.DELIMITER
quotechar = self.QUOTE_CHAR
magic_test_string = self._MAGIC_TEST_STRING
# Python 3 csv module requires arguments to constructor to be of type str.
if py2to3.PY_3:
delimiter = delimiter.decode(self._encoding)
quotechar = quotechar.decode(self._encoding)
magic_test_string = magic_test_string.decode(self._encoding)
return csv.DictReader(
line_reader, delimiter=delimiter, fieldnames=self.COLUMNS,
quotechar=quotechar, restkey=magic_test_string,
restval=magic_test_string) | 0.004008 |
def error(self, msg):
"""Callback run when a recoverable parsing error occurs"""
self._error = True
self._progress.printMsg('XML parse error: %s' % msg, error=True) | 0.010582 |
def about(self):
"""Create About Spyder dialog with general information."""
versions = get_versions()
# Show Git revision for development version
revlink = ''
if versions['revision']:
rev = versions['revision']
revlink = " (<a href='https://github.com/spyder-ide/spyder/"\
"commit/%s'>Commit: %s</a>)" % (rev, rev)
msgBox = QMessageBox(self)
msgBox.setText(
"""
<b>Spyder {spyder_ver}</b> {revision}
<br>The Scientific Python Development Environment |
<a href="{website_url}">Spyder-IDE.org</a>
<br>Copyright © 2009-2019 Spyder Project Contributors and
<a href="{github_url}/blob/master/AUTHORS.txt">others</a>
<br>Distributed under the terms of the
<a href="{github_url}/blob/master/LICENSE.txt">MIT License</a>.
<p>Created by Pierre Raybaut; current maintainer is Carlos Cordoba.
<br>Developed by the
<a href="{github_url}/graphs/contributors">international
Spyder community</a>.
<br>Many thanks to all the Spyder beta testers and dedicated users.
<p>For help with Spyder errors and crashes, please read our
<a href="{trouble_url}">Troubleshooting Guide</a>, and for bug
reports and feature requests, visit our
<a href="{github_url}">Github site</a>.
For project discussion, see our
<a href="{forum_url}">Google Group</a>.
<p>This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development.
The popular Python distributions
<a href="https://www.anaconda.com/download/">Anaconda</a> and
<a href="https://winpython.github.io/">WinPython</a>
also contribute to this plan.
<p>Python {python_ver} {bitness}-bit | Qt {qt_ver} |
{qt_api} {qt_api_ver} | {os_name} {os_ver}
<small><p>Certain source files under other compatible permissive
licenses and/or originally by other authors.
Spyder 3 theme icons derived from
<a href="https://fontawesome.com/">Font Awesome</a> 4.7
(© 2016 David Gandy; SIL OFL 1.1) and
<a href="http://materialdesignicons.com/">Material Design</a>
(© 2014 Austin Andrews; SIL OFL 1.1).
Most Spyder 2 theme icons sourced from the
<a href="https://www.everaldo.com">Crystal Project iconset</a>
(© 2006-2007 Everaldo Coelho; LGPL 2.1+).
Other icons from
<a href="http://p.yusukekamiyamane.com/">Yusuke Kamiyamane</a>
(© 2013 Yusuke Kamiyamane; CC-BY 3.0),
the <a href="http://www.famfamfam.com/lab/icons/silk/">FamFamFam
Silk icon set</a> 1.3 (© 2006 Mark James; CC-BY 2.5), and
the <a href="https://www.kde.org/">KDE Oxygen icons</a>
(© 2007 KDE Artists; LGPL 3.0+).</small>
<p>See the <a href="{github_url}/blob/master/NOTICE.txt">NOTICE</a>
file for full legal information.
"""
.format(spyder_ver=versions['spyder'],
revision=revlink,
website_url=__website_url__,
github_url=__project_url__,
trouble_url=__trouble_url__,
forum_url=__forum_url__,
python_ver=versions['python'],
bitness=versions['bitness'],
qt_ver=versions['qt'],
qt_api=versions['qt_api'],
qt_api_ver=versions['qt_api_ver'],
os_name=versions['system'],
os_ver=versions['release'])
)
msgBox.setWindowTitle(_("About %s") % "Spyder")
msgBox.setStandardButtons(QMessageBox.Ok)
from spyder.config.gui import is_dark_interface
if PYQT5:
if is_dark_interface():
icon_filename = "spyder.svg"
else:
icon_filename = "spyder_dark.svg"
else:
if is_dark_interface():
icon_filename = "spyder.png"
else:
icon_filename = "spyder_dark.png"
app_icon = QIcon(get_image_path(icon_filename))
msgBox.setIconPixmap(app_icon.pixmap(QSize(64, 64)))
msgBox.setTextInteractionFlags(
Qt.LinksAccessibleByMouse | Qt.TextSelectableByMouse)
msgBox.exec_() | 0.000421 |
def flattening(self,R,z):
"""
NAME:
flattening
PURPOSE:
calculate the potential flattening, defined as sqrt(fabs(z/R F_R/F_z))
INPUT:
R - Galactocentric radius (can be Quantity)
z - height (can be Quantity)
OUTPUT:
flattening
HISTORY:
2012-09-13 - Written - Bovy (IAS)
"""
return nu.sqrt(nu.fabs(z/R*self.Rforce(R,z,use_physical=False)\
/self.zforce(R,z,use_physical=False))) | 0.035938 |
def on_edit(request, page_name):
"""Edit the current revision of a page."""
change_note = error = ""
revision = (
Revision.query.filter(
(Page.name == page_name) & (Page.page_id == Revision.page_id)
)
.order_by(Revision.revision_id.desc())
.first()
)
if revision is None:
page = None
else:
page = revision.page
if request.method == "POST":
text = request.form.get("text")
if request.form.get("cancel") or revision and revision.text == text:
return redirect(href(page.name))
elif not text:
error = "You cannot save empty revisions."
else:
change_note = request.form.get("change_note", "")
if page is None:
page = Page(page_name)
session.add(page)
session.add(Revision(page, text, change_note))
session.commit()
return redirect(href(page.name))
return Response(
generate_template(
"action_edit.html",
revision=revision,
page=page,
new=page is None,
page_name=page_name,
change_note=change_note,
error=error,
)
) | 0.000797 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.