text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def responds(status=status.HTTP_200_OK,
meaning='Undocumented status code',
schema=None,
schema_name=None,
**kwargs):
"""Documents the status code per handled case.
Additional parameters may make it into the OpenAPI documentation
per view. Examples of those parameters include
examples={'application/json': <example>}. As schemata are needed
in order to render the examples in the Web UI, an error will be
signaled if examples= are provided without a schema= parameter.
Schemas can be easily built using a specific syntax.
TODO: Document the syntax here
"""
# TODO: Document syntax in above docstring
if status is None:
status = 'default'
obj = {}
obj['description'] = meaning
if schema:
obj['schema'] = parse_schema(schema)
if schema_name:
obj['schema_name'] = schema_name
obj.update(kwargs)
def decorator(func):
# We do not return a decorator function, we just modify
# in-place our function to have the property that we will look
# forward later for.
if not hasattr(func, '_responses'):
func._responses = {}
func._responses[status] = obj
return func
return decorator | 0.000778 |
def get_user_language(user):
""" Simple helper that will fire django signal in order to get User language possibly given by other part of application.
:param user:
:return: string or None
"""
return_value = {}
user_language.send(sender=user, user=user, return_value=return_value)
return return_value.get('language') | 0.005831 |
def predictions_iter(self):
""" property decorated prediction iterator
Returns
-------
iterator : iterator
iterator on prediction sensitivity vectors (matrix)
"""
for fname in self.forecast_names:
yield self.predictions.get(col_names=fname) | 0.006369 |
def watch_file(path, func, *args, **kwargs):
"""
Watch a file for changes by polling its last modification time. Call the
provided function with *args and **kwargs upon modification.
"""
if not path:
raise ValueError('Please specify a file to watch')
print('Watching "{}" for changes'.format(path))
last_modification_time = os.path.getmtime(path)
try:
while True:
time.sleep(1)
new_modification_time = os.path.getmtime(path)
if new_modification_time == last_modification_time:
continue
func(*args, **kwargs)
last_modification_time = new_modification_time
except KeyboardInterrupt:
pass | 0.001387 |
def apply_mtd(self, mtd, *args, cont=False, tag=None, **kwargs):
"""Call the method `mtd` on both sides of the equation
That is, the left-hand-side and right-hand-side are replaced by::
lhs=lhs.<mtd>(*args, **kwargs)
rhs=rhs.<mtd>(*args, **kwargs)
The `cont` and `tag` parameters are as in :meth:`apply`.
"""
new_lhs = getattr(self.lhs, mtd)(*args, **kwargs)
if new_lhs == self.lhs and cont:
new_lhs = None
new_rhs = getattr(self.rhs, mtd)(*args, **kwargs)
new_tag = tag
return self._update(new_lhs, new_rhs, new_tag, cont) | 0.003155 |
def as_dict(self):
"""
Return the dependencies as a dictionary.
Returns:
dict: dictionary of dependencies.
"""
return {
'name': str(self),
'modules': [m.as_dict() for m in self.modules],
'packages': [p.as_dict() for p in self.packages]
} | 0.005988 |
def post(self, url, body=None):
"""Sends this `Resource` instance to the service with a
``POST`` request to the given URL. Takes an optional body"""
response = self.http_request(url, 'POST', body or self, {'Content-Type': 'application/xml; charset=utf-8'})
if response.status not in (200, 201, 204):
self.raise_http_error(response)
self._url = response.getheader('Location')
if response.status in (200, 201):
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml)) | 0.004525 |
def call_later(self, delay, callback):
"""Schedule a one-shot timeout given delay seconds.
This method is only useful for compatibility with older versions of pika.
Args:
delay (float): Non-negative number of seconds from now until
expiration
callback (method): The callback method, having the signature
`callback()`
"""
if hasattr(self._connection.ioloop, "call_later"):
self._connection.ioloop.call_later(delay, callback)
else:
self._connection.ioloop.add_timeout(delay, callback) | 0.004902 |
def set_published_date(self):
"""Parses published date and set value"""
try:
self.published_date = self.soup.find('pubdate').string
except AttributeError:
self.published_date = None | 0.008734 |
def from_floats(red, green, blue):
"""Return a new Color object from red/green/blue values from 0.0 to 1.0."""
return Color(int(red * Color.MAX_VALUE),
int(green * Color.MAX_VALUE),
int(blue * Color.MAX_VALUE)) | 0.011152 |
async def dump_devinfo(dev: Device, file):
"""Dump developer information.
Pass `file` to write the results directly into a file.
"""
import attr
methods = await dev.get_supported_methods()
res = {
"supported_methods": {k: v.asdict() for k, v in methods.items()},
"settings": [attr.asdict(x) for x in await dev.get_settings()],
"sysinfo": attr.asdict(await dev.get_system_info()),
"interface_info": attr.asdict(await dev.get_interface_information()),
}
if file:
click.echo("Saving to file: %s" % file.name)
json.dump(res, file, sort_keys=True, indent=4)
else:
click.echo(json.dumps(res, sort_keys=True, indent=4)) | 0.001416 |
def _maxlength(X):
""" Returns the maximum length of signal trajectories X """
return np.fromiter((map(lambda x: len(x), X)), dtype=int).max() | 0.006667 |
def outside_root_to_404(fn):
"""
Decorator for converting PathOutsideRoot errors to 404s.
"""
@wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except PathOutsideRoot as e:
raise HTTPError(404, "Path outside root: [%s]" % e.args[0])
return wrapped | 0.002994 |
def from_url(cls, url, db=None, skip_full_coverage_check=False, **kwargs):
"""
Return a Redis client object configured from the given URL, which must
use either `the ``redis://`` scheme
<http://www.iana.org/assignments/uri-schemes/prov/redis>`_ for RESP
connections or the ``unix://`` scheme for Unix domain sockets.
For example::
redis://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
"""
connection_pool = ClusterConnectionPool.from_url(url, db=db, **kwargs)
return cls(connection_pool=connection_pool, skip_full_coverage_check=skip_full_coverage_check) | 0.002308 |
def is_homogeneous(self):
"""True if all the elements of the array are the same."""
hom_base = isinstance(self.base_value, (int, long, numpy.integer, float, bool)) \
or type(self.base_value) == self.dtype \
or (isinstance(self.dtype, type) and isinstance(self.base_value, self.dtype))
hom_ops = all(obj.is_homogeneous for f, obj in self.operations if isinstance(obj, larray))
return hom_base and hom_ops | 0.014799 |
def generate_megaman_manifold(sampling=2, nfolds=2,
rotate=True, random_state=None):
"""Generate a manifold of the megaman data"""
X, c = generate_megaman_data(sampling)
for i in range(nfolds):
X = np.hstack([_make_S_curve(x) for x in X.T])
if rotate:
rand = check_random_state(random_state)
R = rand.randn(X.shape[1], X.shape[1])
U, s, VT = np.linalg.svd(R)
X = np.dot(X, U)
return X, c | 0.002088 |
def segmentlistdict_fromsearchsummary_in(xmldoc, program = None):
"""
Convenience wrapper for a common case usage of the segmentlistdict
class: searches the process table in xmldoc for occurances of a
program named program, then scans the search summary table for
matching process IDs and constructs a segmentlistdict object from
the in segments in those rows.
Note: the segmentlists in the segmentlistdict are not necessarily
coalesced, they contain the segments as they appear in the
search_summary table.
"""
stbl = lsctables.SearchSummaryTable.get_table(xmldoc)
ptbl = lsctables.ProcessTable.get_table(xmldoc)
return stbl.get_in_segmentlistdict(program and ptbl.get_ids_by_program(program)) | 0.023944 |
def configure_volume(before_change=lambda: None, after_change=lambda: None):
'''Set up storage (or don't) according to the charm's volume configuration.
Returns the mount point or "ephemeral". before_change and after_change
are optional functions to be called if the volume configuration changes.
'''
config = get_config()
if not config:
hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
raise VolumeConfigurationError()
if config['ephemeral']:
if os.path.ismount(config['mountpoint']):
before_change()
unmount_volume(config)
after_change()
return 'ephemeral'
else:
# persistent storage
if os.path.ismount(config['mountpoint']):
mounts = dict(managed_mounts())
if mounts.get(config['mountpoint']) != config['device']:
before_change()
unmount_volume(config)
mount_volume(config)
after_change()
else:
before_change()
mount_volume(config)
after_change()
return config['mountpoint'] | 0.000861 |
def parse_options(cls, line, ns={}):
"""
Similar to parse but returns a list of Options objects instead
of the dictionary format.
"""
parsed = cls.parse(line, ns=ns)
options_list = []
for spec in sorted(parsed.keys()):
options = parsed[spec]
merged = {}
for group in options.values():
merged = dict(group.kwargs, **merged)
options_list.append(Options(spec, **merged))
return options_list | 0.003883 |
def seek(self, offset, whence=0):
"""Seek to the specified position.
:param int offset: The offset in bytes.
:param int whence: Where the offset is from.
Returns the position after seeking."""
logger.debug('seeking to offset: %r whence: %r', offset, whence)
if whence not in s3.WHENCE_CHOICES:
raise ValueError('invalid whence, expected one of %r' % s3.WHENCE_CHOICES)
if not self.seekable():
raise OSError
if whence == s3.START:
new_pos = offset
elif whence == s3.CURRENT:
new_pos = self._current_pos + offset
elif whence == s3.END:
new_pos = self.content_length + offset
new_pos = s3.clamp(new_pos, 0, self.content_length)
if self._current_pos == new_pos:
return self._current_pos
logger.debug("http seeking from current_pos: %d to new_pos: %d", self._current_pos, new_pos)
self._current_pos = new_pos
if new_pos == self.content_length:
self.response = None
self._read_iter = None
self._read_buffer.empty()
else:
response = self._partial_request(new_pos)
if response.ok:
self.response = response
self._read_iter = self.response.iter_content(self.buffer_size)
self._read_buffer.empty()
else:
self.response = None
return self._current_pos | 0.002681 |
def _starts_with_drive_letter(self, file_path):
"""Return True if file_path starts with a drive letter.
Args:
file_path: the full path to be examined.
Returns:
`True` if drive letter support is enabled in the filesystem and
the path starts with a drive letter.
"""
colon = self._matching_string(file_path, ':')
return (self.is_windows_fs and len(file_path) >= 2 and
file_path[:1].isalpha and (file_path[1:2]) == colon) | 0.003846 |
def set_permitted_ip(address=None, deploy=False):
'''
Add an IPv4 address or network to the permitted IP list.
CLI Example:
Args:
address (str): The IPv4 address or network to allow access to add to the Palo Alto device.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_permitted_ip 10.0.0.1
salt '*' panos.set_permitted_ip 10.0.0.0/24
salt '*' panos.set_permitted_ip 10.0.0.1 deploy=True
'''
if not address:
raise CommandExecutionError("Address option must not be empty.")
ret = {}
query = {'type': 'config',
'action': 'set',
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/permitted-ip',
'element': '<entry name=\'{0}\'></entry>'.format(address)}
ret.update(__proxy__['panos.call'](query))
if deploy is True:
ret.update(commit())
return ret | 0.003891 |
def generateCatalog(wcs, mode='automatic', catalog=None,
src_find_filters=None, **kwargs):
""" Function which determines what type of catalog object needs to be
instantiated based on what type of source selection algorithm the user
specified.
Parameters
----------
wcs : obj
WCS object generated by STWCS or PyWCS
catalog : str or ndarray
Filename of existing catalog or ndarray of image for generation of
source catalog.
kwargs : dict
Parameters needed to interpret source catalog from input catalog
with `findmode` being required.
Returns
-------
catalog : obj
A Catalog-based class instance for keeping track of WCS and
associated source catalog
"""
if not isinstance(catalog,Catalog):
if mode == 'automatic': # if an array is provided as the source
# Create a new catalog directly from the image
catalog = ImageCatalog(wcs,catalog,src_find_filters,**kwargs)
else: # a catalog file was provided as the catalog source
catalog = UserCatalog(wcs,catalog,**kwargs)
return catalog | 0.007705 |
def remove_go(self, target):
"""
FOR SAVING MEMORY
"""
with self.lock:
if not self._go:
try:
self.job_queue.remove(target)
except ValueError:
pass | 0.007634 |
def get_node_pos(self, key):
"""Given a string key a corresponding node in the hash ring is returned
along with it's position in the ring.
If the hash ring is empty, (`None`, `None`) is returned.
"""
if len(self.ring) == 0:
return [None, None]
crc = self.hash_method(b(key))
idx = bisect.bisect(self.sorted_keys, crc)
# prevents out of range index
idx = min(idx, (self.replicas * len(self.nodes)) - 1)
return [self.ring[self.sorted_keys[idx]], idx] | 0.003697 |
def trainRegressor(cls, data, categoricalFeaturesInfo,
impurity="variance", maxDepth=5, maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0):
"""
Train a decision tree model for regression.
:param data:
Training data: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param impurity:
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
:param maxBins:
Number of bins used for finding splits at each node.
(default: 32)
:param minInstancesPerNode:
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
:param minInfoGain:
Minimum info gain required to create a split.
(default: 0.0)
:return:
DecisionTreeModel.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = DecisionTree.trainRegressor(sc.parallelize(sparse_data), {})
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {1: 0.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [0.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "regression", 0, categoricalFeaturesInfo,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain) | 0.002965 |
def generate_docker_compose(self):
""" Generate a sample docker compose
"""
example = {}
example['app'] = {}
example['app']['environment'] = []
for key in sorted(list(self.spec.keys())):
if self.spec[key]['type'] in (dict, list):
value = f"\'{json.dumps(self.spec[key].get('example', ''))}\'"
else:
value = f"{self.spec[key].get('example', '')}"
example['app']['environment'].append(f"{self.env_prefix}_{key.upper()}={value}")
print(yaml.dump(example, default_flow_style=False)) | 0.004983 |
def receive(self):
"""I receive data+hash, check for a match, confirm or not
confirm to the sender, and return the data payload.
"""
def _receive(input_message):
self.data = input_message[:-64]
_hash = input_message[-64:]
if h.sha256(self.data).hexdigest() == _hash:
self._w.send_message('Confirmed!')
else:
self._w.send_message('Not Confirmed!')
yield self.start_tor()
self._w = wormhole.create(u'axotor', RENDEZVOUS_RELAY, self._reactor,
tor=self._tor, timing=self._timing)
self._w.set_code(self._code)
yield self._w.get_message().addCallback(_receive)
yield self._w.close()
self._reactor.stop()
return | 0.002478 |
def copy(self: BaseBoardT) -> BaseBoardT:
"""Creates a copy of the board."""
board = type(self)(None)
board.pawns = self.pawns
board.knights = self.knights
board.bishops = self.bishops
board.rooks = self.rooks
board.queens = self.queens
board.kings = self.kings
board.occupied_co[WHITE] = self.occupied_co[WHITE]
board.occupied_co[BLACK] = self.occupied_co[BLACK]
board.occupied = self.occupied
board.promoted = self.promoted
return board | 0.00367 |
def find_content(self, text):
"""Find content."""
if self.trigraphs:
text = RE_TRIGRAPHS.sub(self.process_trigraphs, text)
for m in self.pattern.finditer(self.norm_nl(text)):
self.evaluate(m) | 0.008299 |
def type_id(self) -> UnitTypeId:
""" UnitTypeId found in sc2/ids/unit_typeid
Caches all type_ids of the same unit type"""
unit_type = self._proto.unit_type
if unit_type not in self._game_data.unit_types:
self._game_data.unit_types[unit_type] = UnitTypeId(unit_type)
return self._game_data.unit_types[unit_type] | 0.005525 |
def noise4d(self, x, y, z, w):
"""
Generate 4D OpenSimplex noise from X,Y,Z,W coordinates.
"""
# Place input coordinates on simplectic honeycomb.
stretch_offset = (x + y + z + w) * STRETCH_CONSTANT_4D
xs = x + stretch_offset
ys = y + stretch_offset
zs = z + stretch_offset
ws = w + stretch_offset
# Floor to get simplectic honeycomb coordinates of rhombo-hypercube super-cell origin.
xsb = floor(xs)
ysb = floor(ys)
zsb = floor(zs)
wsb = floor(ws)
# Skew out to get actual coordinates of stretched rhombo-hypercube origin. We'll need these later.
squish_offset = (xsb + ysb + zsb + wsb) * SQUISH_CONSTANT_4D
xb = xsb + squish_offset
yb = ysb + squish_offset
zb = zsb + squish_offset
wb = wsb + squish_offset
# Compute simplectic honeycomb coordinates relative to rhombo-hypercube origin.
xins = xs - xsb
yins = ys - ysb
zins = zs - zsb
wins = ws - wsb
# Sum those together to get a value that determines which region we're in.
in_sum = xins + yins + zins + wins
# Positions relative to origin po.
dx0 = x - xb
dy0 = y - yb
dz0 = z - zb
dw0 = w - wb
value = 0
extrapolate = self._extrapolate4d
if in_sum <= 1: # We're inside the pentachoron (4-Simplex) at (0,0,0,0)
# Determine which two of (0,0,0,1), (0,0,1,0), (0,1,0,0), (1,0,0,0) are closest.
a_po = 0x01
a_score = xins
b_po = 0x02
b_score = yins
if a_score >= b_score and zins > b_score:
b_score = zins
b_po = 0x04
elif a_score < b_score and zins > a_score:
a_score = zins
a_po = 0x04
if a_score >= b_score and wins > b_score:
b_score = wins
b_po = 0x08
elif a_score < b_score and wins > a_score:
a_score = wins
a_po = 0x08
# Now we determine the three lattice pos not part of the pentachoron that may contribute.
# This depends on the closest two pentachoron vertices, including (0,0,0,0)
uins = 1 - in_sum
if uins > a_score or uins > b_score: # (0,0,0,0) is one of the closest two pentachoron vertices.
c = b_po if (b_score > a_score) else a_po # Our other closest vertex is the closest out of a and b.
if (c & 0x01) == 0:
xsv_ext0 = xsb - 1
xsv_ext1 = xsv_ext2 = xsb
dx_ext0 = dx0 + 1
dx_ext1 = dx_ext2 = dx0
else:
xsv_ext0 = xsv_ext1 = xsv_ext2 = xsb + 1
dx_ext0 = dx_ext1 = dx_ext2 = dx0 - 1
if (c & 0x02) == 0:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb
dy_ext0 = dy_ext1 = dy_ext2 = dy0
if (c & 0x01) == 0x01:
ysv_ext0 -= 1
dy_ext0 += 1
else:
ysv_ext1 -= 1
dy_ext1 += 1
else:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb + 1
dy_ext0 = dy_ext1 = dy_ext2 = dy0 - 1
if (c & 0x04) == 0:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb
dz_ext0 = dz_ext1 = dz_ext2 = dz0
if (c & 0x03) != 0:
if (c & 0x03) == 0x03:
zsv_ext0 -= 1
dz_ext0 += 1
else:
zsv_ext1 -= 1
dz_ext1 += 1
else:
zsv_ext2 -= 1
dz_ext2 += 1
else:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb + 1
dz_ext0 = dz_ext1 = dz_ext2 = dz0 - 1
if (c & 0x08) == 0:
wsv_ext0 = wsv_ext1 = wsb
wsv_ext2 = wsb - 1
dw_ext0 = dw_ext1 = dw0
dw_ext2 = dw0 + 1
else:
wsv_ext0 = wsv_ext1 = wsv_ext2 = wsb + 1
dw_ext0 = dw_ext1 = dw_ext2 = dw0 - 1
else: # (0,0,0,0) is not one of the closest two pentachoron vertices.
c = (a_po | b_po) # Our three extra vertices are determined by the closest two.
if (c & 0x01) == 0:
xsv_ext0 = xsv_ext2 = xsb
xsv_ext1 = xsb - 1
dx_ext0 = dx0 - 2 * SQUISH_CONSTANT_4D
dx_ext1 = dx0 + 1 - SQUISH_CONSTANT_4D
dx_ext2 = dx0 - SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsv_ext2 = xsb + 1
dx_ext0 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dx_ext1 = dx_ext2 = dx0 - 1 - SQUISH_CONSTANT_4D
if (c & 0x02) == 0:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb
dy_ext0 = dy0 - 2 * SQUISH_CONSTANT_4D
dy_ext1 = dy_ext2 = dy0 - SQUISH_CONSTANT_4D
if (c & 0x01) == 0x01:
ysv_ext1 -= 1
dy_ext1 += 1
else:
ysv_ext2 -= 1
dy_ext2 += 1
else:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb + 1
dy_ext0 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dy_ext1 = dy_ext2 = dy0 - 1 - SQUISH_CONSTANT_4D
if (c & 0x04) == 0:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb
dz_ext0 = dz0 - 2 * SQUISH_CONSTANT_4D
dz_ext1 = dz_ext2 = dz0 - SQUISH_CONSTANT_4D
if (c & 0x03) == 0x03:
zsv_ext1 -= 1
dz_ext1 += 1
else:
zsv_ext2 -= 1
dz_ext2 += 1
else:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb + 1
dz_ext0 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dz_ext1 = dz_ext2 = dz0 - 1 - SQUISH_CONSTANT_4D
if (c & 0x08) == 0:
wsv_ext0 = wsv_ext1 = wsb
wsv_ext2 = wsb - 1
dw_ext0 = dw0 - 2 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 - SQUISH_CONSTANT_4D
dw_ext2 = dw0 + 1 - SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsv_ext2 = wsb + 1
dw_ext0 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
dw_ext1 = dw_ext2 = dw0 - 1 - SQUISH_CONSTANT_4D
# Contribution (0,0,0,0)
attn0 = 2 - dx0 * dx0 - dy0 * dy0 - dz0 * dz0 - dw0 * dw0
if attn0 > 0:
attn0 *= attn0
value += attn0 * attn0 * extrapolate(xsb + 0, ysb + 0, zsb + 0, wsb + 0, dx0, dy0, dz0, dw0)
# Contribution (1,0,0,0)
dx1 = dx0 - 1 - SQUISH_CONSTANT_4D
dy1 = dy0 - 0 - SQUISH_CONSTANT_4D
dz1 = dz0 - 0 - SQUISH_CONSTANT_4D
dw1 = dw0 - 0 - SQUISH_CONSTANT_4D
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 - dw1 * dw1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 1, ysb + 0, zsb + 0, wsb + 0, dx1, dy1, dz1, dw1)
# Contribution (0,1,0,0)
dx2 = dx0 - 0 - SQUISH_CONSTANT_4D
dy2 = dy0 - 1 - SQUISH_CONSTANT_4D
dz2 = dz1
dw2 = dw1
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 - dw2 * dw2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 0, ysb + 1, zsb + 0, wsb + 0, dx2, dy2, dz2, dw2)
# Contribution (0,0,1,0)
dx3 = dx2
dy3 = dy1
dz3 = dz0 - 1 - SQUISH_CONSTANT_4D
dw3 = dw1
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 - dw3 * dw3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 0, ysb + 0, zsb + 1, wsb + 0, dx3, dy3, dz3, dw3)
# Contribution (0,0,0,1)
dx4 = dx2
dy4 = dy1
dz4 = dz1
dw4 = dw0 - 1 - SQUISH_CONSTANT_4D
attn4 = 2 - dx4 * dx4 - dy4 * dy4 - dz4 * dz4 - dw4 * dw4
if attn4 > 0:
attn4 *= attn4
value += attn4 * attn4 * extrapolate(xsb + 0, ysb + 0, zsb + 0, wsb + 1, dx4, dy4, dz4, dw4)
elif in_sum >= 3: # We're inside the pentachoron (4-Simplex) at (1,1,1,1)
# Determine which two of (1,1,1,0), (1,1,0,1), (1,0,1,1), (0,1,1,1) are closest.
a_po = 0x0E
a_score = xins
b_po = 0x0D
b_score = yins
if a_score <= b_score and zins < b_score:
b_score = zins
b_po = 0x0B
elif a_score > b_score and zins < a_score:
a_score = zins
a_po = 0x0B
if a_score <= b_score and wins < b_score:
b_score = wins
b_po = 0x07
elif a_score > b_score and wins < a_score:
a_score = wins
a_po = 0x07
# Now we determine the three lattice pos not part of the pentachoron that may contribute.
# This depends on the closest two pentachoron vertices, including (0,0,0,0)
uins = 4 - in_sum
if uins < a_score or uins < b_score: # (1,1,1,1) is one of the closest two pentachoron vertices.
c = b_po if (b_score < a_score) else a_po # Our other closest vertex is the closest out of a and b.
if (c & 0x01) != 0:
xsv_ext0 = xsb + 2
xsv_ext1 = xsv_ext2 = xsb + 1
dx_ext0 = dx0 - 2 - 4 * SQUISH_CONSTANT_4D
dx_ext1 = dx_ext2 = dx0 - 1 - 4 * SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsv_ext2 = xsb
dx_ext0 = dx_ext1 = dx_ext2 = dx0 - 4 * SQUISH_CONSTANT_4D
if (c & 0x02) != 0:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb + 1
dy_ext0 = dy_ext1 = dy_ext2 = dy0 - 1 - 4 * SQUISH_CONSTANT_4D
if (c & 0x01) != 0:
ysv_ext1 += 1
dy_ext1 -= 1
else:
ysv_ext0 += 1
dy_ext0 -= 1
else:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb
dy_ext0 = dy_ext1 = dy_ext2 = dy0 - 4 * SQUISH_CONSTANT_4D
if (c & 0x04) != 0:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb + 1
dz_ext0 = dz_ext1 = dz_ext2 = dz0 - 1 - 4 * SQUISH_CONSTANT_4D
if (c & 0x03) != 0x03:
if (c & 0x03) == 0:
zsv_ext0 += 1
dz_ext0 -= 1
else:
zsv_ext1 += 1
dz_ext1 -= 1
else:
zsv_ext2 += 1
dz_ext2 -= 1
else:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb
dz_ext0 = dz_ext1 = dz_ext2 = dz0 - 4 * SQUISH_CONSTANT_4D
if (c & 0x08) != 0:
wsv_ext0 = wsv_ext1 = wsb + 1
wsv_ext2 = wsb + 2
dw_ext0 = dw_ext1 = dw0 - 1 - 4 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 2 - 4 * SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsv_ext2 = wsb
dw_ext0 = dw_ext1 = dw_ext2 = dw0 - 4 * SQUISH_CONSTANT_4D
else: # (1,1,1,1) is not one of the closest two pentachoron vertices.
c = (a_po & b_po) # Our three extra vertices are determined by the closest two.
if (c & 0x01) != 0:
xsv_ext0 = xsv_ext2 = xsb + 1
xsv_ext1 = xsb + 2
dx_ext0 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dx_ext1 = dx0 - 2 - 3 * SQUISH_CONSTANT_4D
dx_ext2 = dx0 - 1 - 3 * SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsv_ext2 = xsb
dx_ext0 = dx0 - 2 * SQUISH_CONSTANT_4D
dx_ext1 = dx_ext2 = dx0 - 3 * SQUISH_CONSTANT_4D
if (c & 0x02) != 0:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb + 1
dy_ext0 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dy_ext1 = dy_ext2 = dy0 - 1 - 3 * SQUISH_CONSTANT_4D
if (c & 0x01) != 0:
ysv_ext2 += 1
dy_ext2 -= 1
else:
ysv_ext1 += 1
dy_ext1 -= 1
else:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb
dy_ext0 = dy0 - 2 * SQUISH_CONSTANT_4D
dy_ext1 = dy_ext2 = dy0 - 3 * SQUISH_CONSTANT_4D
if (c & 0x04) != 0:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb + 1
dz_ext0 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dz_ext1 = dz_ext2 = dz0 - 1 - 3 * SQUISH_CONSTANT_4D
if (c & 0x03) != 0:
zsv_ext2 += 1
dz_ext2 -= 1
else:
zsv_ext1 += 1
dz_ext1 -= 1
else:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb
dz_ext0 = dz0 - 2 * SQUISH_CONSTANT_4D
dz_ext1 = dz_ext2 = dz0 - 3 * SQUISH_CONSTANT_4D
if (c & 0x08) != 0:
wsv_ext0 = wsv_ext1 = wsb + 1
wsv_ext2 = wsb + 2
dw_ext0 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 - 1 - 3 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 2 - 3 * SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsv_ext2 = wsb
dw_ext0 = dw0 - 2 * SQUISH_CONSTANT_4D
dw_ext1 = dw_ext2 = dw0 - 3 * SQUISH_CONSTANT_4D
# Contribution (1,1,1,0)
dx4 = dx0 - 1 - 3 * SQUISH_CONSTANT_4D
dy4 = dy0 - 1 - 3 * SQUISH_CONSTANT_4D
dz4 = dz0 - 1 - 3 * SQUISH_CONSTANT_4D
dw4 = dw0 - 3 * SQUISH_CONSTANT_4D
attn4 = 2 - dx4 * dx4 - dy4 * dy4 - dz4 * dz4 - dw4 * dw4
if attn4 > 0:
attn4 *= attn4
value += attn4 * attn4 * extrapolate(xsb + 1, ysb + 1, zsb + 1, wsb + 0, dx4, dy4, dz4, dw4)
# Contribution (1,1,0,1)
dx3 = dx4
dy3 = dy4
dz3 = dz0 - 3 * SQUISH_CONSTANT_4D
dw3 = dw0 - 1 - 3 * SQUISH_CONSTANT_4D
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 - dw3 * dw3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 1, ysb + 1, zsb + 0, wsb + 1, dx3, dy3, dz3, dw3)
# Contribution (1,0,1,1)
dx2 = dx4
dy2 = dy0 - 3 * SQUISH_CONSTANT_4D
dz2 = dz4
dw2 = dw3
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 - dw2 * dw2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 1, ysb + 0, zsb + 1, wsb + 1, dx2, dy2, dz2, dw2)
# Contribution (0,1,1,1)
dx1 = dx0 - 3 * SQUISH_CONSTANT_4D
dz1 = dz4
dy1 = dy4
dw1 = dw3
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 - dw1 * dw1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 0, ysb + 1, zsb + 1, wsb + 1, dx1, dy1, dz1, dw1)
# Contribution (1,1,1,1)
dx0 = dx0 - 1 - 4 * SQUISH_CONSTANT_4D
dy0 = dy0 - 1 - 4 * SQUISH_CONSTANT_4D
dz0 = dz0 - 1 - 4 * SQUISH_CONSTANT_4D
dw0 = dw0 - 1 - 4 * SQUISH_CONSTANT_4D
attn0 = 2 - dx0 * dx0 - dy0 * dy0 - dz0 * dz0 - dw0 * dw0
if attn0 > 0:
attn0 *= attn0
value += attn0 * attn0 * extrapolate(xsb + 1, ysb + 1, zsb + 1, wsb + 1, dx0, dy0, dz0, dw0)
elif in_sum <= 2: # We're inside the first dispentachoron (Rectified 4-Simplex)
a_is_bigger_side = True
b_is_bigger_side = True
# Decide between (1,1,0,0) and (0,0,1,1)
if xins + yins > zins + wins:
a_score = xins + yins
a_po = 0x03
else:
a_score = zins + wins
a_po = 0x0C
# Decide between (1,0,1,0) and (0,1,0,1)
if xins + zins > yins + wins:
b_score = xins + zins
b_po = 0x05
else:
b_score = yins + wins
b_po = 0x0A
# Closer between (1,0,0,1) and (0,1,1,0) will replace the further of a and b, if closer.
if xins + wins > yins + zins:
score = xins + wins
if a_score >= b_score and score > b_score:
b_score = score
b_po = 0x09
elif a_score < b_score and score > a_score:
a_score = score
a_po = 0x09
else:
score = yins + zins
if a_score >= b_score and score > b_score:
b_score = score
b_po = 0x06
elif a_score < b_score and score > a_score:
a_score = score
a_po = 0x06
# Decide if (1,0,0,0) is closer.
p1 = 2 - in_sum + xins
if a_score >= b_score and p1 > b_score:
b_score = p1
b_po = 0x01
b_is_bigger_side = False
elif a_score < b_score and p1 > a_score:
a_score = p1
a_po = 0x01
a_is_bigger_side = False
# Decide if (0,1,0,0) is closer.
p2 = 2 - in_sum + yins
if a_score >= b_score and p2 > b_score:
b_score = p2
b_po = 0x02
b_is_bigger_side = False
elif a_score < b_score and p2 > a_score:
a_score = p2
a_po = 0x02
a_is_bigger_side = False
# Decide if (0,0,1,0) is closer.
p3 = 2 - in_sum + zins
if a_score >= b_score and p3 > b_score:
b_score = p3
b_po = 0x04
b_is_bigger_side = False
elif a_score < b_score and p3 > a_score:
a_score = p3
a_po = 0x04
a_is_bigger_side = False
# Decide if (0,0,0,1) is closer.
p4 = 2 - in_sum + wins
if a_score >= b_score and p4 > b_score:
b_po = 0x08
b_is_bigger_side = False
elif a_score < b_score and p4 > a_score:
a_po = 0x08
a_is_bigger_side = False
# Where each of the two closest pos are determines how the extra three vertices are calculated.
if a_is_bigger_side == b_is_bigger_side:
if a_is_bigger_side: # Both closest pos on the bigger side
c1 = (a_po | b_po)
c2 = (a_po & b_po)
if (c1 & 0x01) == 0:
xsv_ext0 = xsb
xsv_ext1 = xsb - 1
dx_ext0 = dx0 - 3 * SQUISH_CONSTANT_4D
dx_ext1 = dx0 + 1 - 2 * SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsb + 1
dx_ext0 = dx0 - 1 - 3 * SQUISH_CONSTANT_4D
dx_ext1 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
if (c1 & 0x02) == 0:
ysv_ext0 = ysb
ysv_ext1 = ysb - 1
dy_ext0 = dy0 - 3 * SQUISH_CONSTANT_4D
dy_ext1 = dy0 + 1 - 2 * SQUISH_CONSTANT_4D
else:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy0 - 1 - 3 * SQUISH_CONSTANT_4D
dy_ext1 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
if (c1 & 0x04) == 0:
zsv_ext0 = zsb
zsv_ext1 = zsb - 1
dz_ext0 = dz0 - 3 * SQUISH_CONSTANT_4D
dz_ext1 = dz0 + 1 - 2 * SQUISH_CONSTANT_4D
else:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz0 - 1 - 3 * SQUISH_CONSTANT_4D
dz_ext1 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
if (c1 & 0x08) == 0:
wsv_ext0 = wsb
wsv_ext1 = wsb - 1
dw_ext0 = dw0 - 3 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 + 1 - 2 * SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsb + 1
dw_ext0 = dw0 - 1 - 3 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
# One combination is a _permutation of (0,0,0,2) based on c2
xsv_ext2 = xsb
ysv_ext2 = ysb
zsv_ext2 = zsb
wsv_ext2 = wsb
dx_ext2 = dx0 - 2 * SQUISH_CONSTANT_4D
dy_ext2 = dy0 - 2 * SQUISH_CONSTANT_4D
dz_ext2 = dz0 - 2 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 2 * SQUISH_CONSTANT_4D
if (c2 & 0x01) != 0:
xsv_ext2 += 2
dx_ext2 -= 2
elif (c2 & 0x02) != 0:
ysv_ext2 += 2
dy_ext2 -= 2
elif (c2 & 0x04) != 0:
zsv_ext2 += 2
dz_ext2 -= 2
else:
wsv_ext2 += 2
dw_ext2 -= 2
else: # Both closest pos on the smaller side
# One of the two extra pos is (0,0,0,0)
xsv_ext2 = xsb
ysv_ext2 = ysb
zsv_ext2 = zsb
wsv_ext2 = wsb
dx_ext2 = dx0
dy_ext2 = dy0
dz_ext2 = dz0
dw_ext2 = dw0
# Other two pos are based on the omitted axes.
c = (a_po | b_po)
if (c & 0x01) == 0:
xsv_ext0 = xsb - 1
xsv_ext1 = xsb
dx_ext0 = dx0 + 1 - SQUISH_CONSTANT_4D
dx_ext1 = dx0 - SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsb + 1
dx_ext0 = dx_ext1 = dx0 - 1 - SQUISH_CONSTANT_4D
if (c & 0x02) == 0:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy_ext1 = dy0 - SQUISH_CONSTANT_4D
if (c & 0x01) == 0x01:
ysv_ext0 -= 1
dy_ext0 += 1
else:
ysv_ext1 -= 1
dy_ext1 += 1
else:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy_ext1 = dy0 - 1 - SQUISH_CONSTANT_4D
if (c & 0x04) == 0:
zsv_ext0 = zsv_ext1 = zsb
dz_ext0 = dz_ext1 = dz0 - SQUISH_CONSTANT_4D
if (c & 0x03) == 0x03:
zsv_ext0 -= 1
dz_ext0 += 1
else:
zsv_ext1 -= 1
dz_ext1 += 1
else:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz_ext1 = dz0 - 1 - SQUISH_CONSTANT_4D
if (c & 0x08) == 0:
wsv_ext0 = wsb
wsv_ext1 = wsb - 1
dw_ext0 = dw0 - SQUISH_CONSTANT_4D
dw_ext1 = dw0 + 1 - SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsb + 1
dw_ext0 = dw_ext1 = dw0 - 1 - SQUISH_CONSTANT_4D
else: # One po on each "side"
if a_is_bigger_side:
c1 = a_po
c2 = b_po
else:
c1 = b_po
c2 = a_po
# Two contributions are the bigger-sided po with each 0 replaced with -1.
if (c1 & 0x01) == 0:
xsv_ext0 = xsb - 1
xsv_ext1 = xsb
dx_ext0 = dx0 + 1 - SQUISH_CONSTANT_4D
dx_ext1 = dx0 - SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsb + 1
dx_ext0 = dx_ext1 = dx0 - 1 - SQUISH_CONSTANT_4D
if (c1 & 0x02) == 0:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy_ext1 = dy0 - SQUISH_CONSTANT_4D
if (c1 & 0x01) == 0x01:
ysv_ext0 -= 1
dy_ext0 += 1
else:
ysv_ext1 -= 1
dy_ext1 += 1
else:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy_ext1 = dy0 - 1 - SQUISH_CONSTANT_4D
if (c1 & 0x04) == 0:
zsv_ext0 = zsv_ext1 = zsb
dz_ext0 = dz_ext1 = dz0 - SQUISH_CONSTANT_4D
if (c1 & 0x03) == 0x03:
zsv_ext0 -= 1
dz_ext0 += 1
else:
zsv_ext1 -= 1
dz_ext1 += 1
else:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz_ext1 = dz0 - 1 - SQUISH_CONSTANT_4D
if (c1 & 0x08) == 0:
wsv_ext0 = wsb
wsv_ext1 = wsb - 1
dw_ext0 = dw0 - SQUISH_CONSTANT_4D
dw_ext1 = dw0 + 1 - SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsb + 1
dw_ext0 = dw_ext1 = dw0 - 1 - SQUISH_CONSTANT_4D
# One contribution is a _permutation of (0,0,0,2) based on the smaller-sided po
xsv_ext2 = xsb
ysv_ext2 = ysb
zsv_ext2 = zsb
wsv_ext2 = wsb
dx_ext2 = dx0 - 2 * SQUISH_CONSTANT_4D
dy_ext2 = dy0 - 2 * SQUISH_CONSTANT_4D
dz_ext2 = dz0 - 2 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 2 * SQUISH_CONSTANT_4D
if (c2 & 0x01) != 0:
xsv_ext2 += 2
dx_ext2 -= 2
elif (c2 & 0x02) != 0:
ysv_ext2 += 2
dy_ext2 -= 2
elif (c2 & 0x04) != 0:
zsv_ext2 += 2
dz_ext2 -= 2
else:
wsv_ext2 += 2
dw_ext2 -= 2
# Contribution (1,0,0,0)
dx1 = dx0 - 1 - SQUISH_CONSTANT_4D
dy1 = dy0 - 0 - SQUISH_CONSTANT_4D
dz1 = dz0 - 0 - SQUISH_CONSTANT_4D
dw1 = dw0 - 0 - SQUISH_CONSTANT_4D
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 - dw1 * dw1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 1, ysb + 0, zsb + 0, wsb + 0, dx1, dy1, dz1, dw1)
# Contribution (0,1,0,0)
dx2 = dx0 - 0 - SQUISH_CONSTANT_4D
dy2 = dy0 - 1 - SQUISH_CONSTANT_4D
dz2 = dz1
dw2 = dw1
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 - dw2 * dw2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 0, ysb + 1, zsb + 0, wsb + 0, dx2, dy2, dz2, dw2)
# Contribution (0,0,1,0)
dx3 = dx2
dy3 = dy1
dz3 = dz0 - 1 - SQUISH_CONSTANT_4D
dw3 = dw1
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 - dw3 * dw3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 0, ysb + 0, zsb + 1, wsb + 0, dx3, dy3, dz3, dw3)
# Contribution (0,0,0,1)
dx4 = dx2
dy4 = dy1
dz4 = dz1
dw4 = dw0 - 1 - SQUISH_CONSTANT_4D
attn4 = 2 - dx4 * dx4 - dy4 * dy4 - dz4 * dz4 - dw4 * dw4
if attn4 > 0:
attn4 *= attn4
value += attn4 * attn4 * extrapolate(xsb + 0, ysb + 0, zsb + 0, wsb + 1, dx4, dy4, dz4, dw4)
# Contribution (1,1,0,0)
dx5 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy5 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz5 = dz0 - 0 - 2 * SQUISH_CONSTANT_4D
dw5 = dw0 - 0 - 2 * SQUISH_CONSTANT_4D
attn5 = 2 - dx5 * dx5 - dy5 * dy5 - dz5 * dz5 - dw5 * dw5
if attn5 > 0:
attn5 *= attn5
value += attn5 * attn5 * extrapolate(xsb + 1, ysb + 1, zsb + 0, wsb + 0, dx5, dy5, dz5, dw5)
# Contribution (1,0,1,0)
dx6 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy6 = dy0 - 0 - 2 * SQUISH_CONSTANT_4D
dz6 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw6 = dw0 - 0 - 2 * SQUISH_CONSTANT_4D
attn6 = 2 - dx6 * dx6 - dy6 * dy6 - dz6 * dz6 - dw6 * dw6
if attn6 > 0:
attn6 *= attn6
value += attn6 * attn6 * extrapolate(xsb + 1, ysb + 0, zsb + 1, wsb + 0, dx6, dy6, dz6, dw6)
# Contribution (1,0,0,1)
dx7 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy7 = dy0 - 0 - 2 * SQUISH_CONSTANT_4D
dz7 = dz0 - 0 - 2 * SQUISH_CONSTANT_4D
dw7 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
attn7 = 2 - dx7 * dx7 - dy7 * dy7 - dz7 * dz7 - dw7 * dw7
if attn7 > 0:
attn7 *= attn7
value += attn7 * attn7 * extrapolate(xsb + 1, ysb + 0, zsb + 0, wsb + 1, dx7, dy7, dz7, dw7)
# Contribution (0,1,1,0)
dx8 = dx0 - 0 - 2 * SQUISH_CONSTANT_4D
dy8 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz8 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw8 = dw0 - 0 - 2 * SQUISH_CONSTANT_4D
attn8 = 2 - dx8 * dx8 - dy8 * dy8 - dz8 * dz8 - dw8 * dw8
if attn8 > 0:
attn8 *= attn8
value += attn8 * attn8 * extrapolate(xsb + 0, ysb + 1, zsb + 1, wsb + 0, dx8, dy8, dz8, dw8)
# Contribution (0,1,0,1)
dx9 = dx0 - 0 - 2 * SQUISH_CONSTANT_4D
dy9 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz9 = dz0 - 0 - 2 * SQUISH_CONSTANT_4D
dw9 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
attn9 = 2 - dx9 * dx9 - dy9 * dy9 - dz9 * dz9 - dw9 * dw9
if attn9 > 0:
attn9 *= attn9
value += attn9 * attn9 * extrapolate(xsb + 0, ysb + 1, zsb + 0, wsb + 1, dx9, dy9, dz9, dw9)
# Contribution (0,0,1,1)
dx10 = dx0 - 0 - 2 * SQUISH_CONSTANT_4D
dy10 = dy0 - 0 - 2 * SQUISH_CONSTANT_4D
dz10 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw10 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
attn10 = 2 - dx10 * dx10 - dy10 * dy10 - dz10 * dz10 - dw10 * dw10
if attn10 > 0:
attn10 *= attn10
value += attn10 * attn10 * extrapolate(xsb + 0, ysb + 0, zsb + 1, wsb + 1, dx10, dy10, dz10, dw10)
else: # We're inside the second dispentachoron (Rectified 4-Simplex)
a_is_bigger_side = True
b_is_bigger_side = True
# Decide between (0,0,1,1) and (1,1,0,0)
if xins + yins < zins + wins:
a_score = xins + yins
a_po = 0x0C
else:
a_score = zins + wins
a_po = 0x03
# Decide between (0,1,0,1) and (1,0,1,0)
if xins + zins < yins + wins:
b_score = xins + zins
b_po = 0x0A
else:
b_score = yins + wins
b_po = 0x05
# Closer between (0,1,1,0) and (1,0,0,1) will replace the further of a and b, if closer.
if xins + wins < yins + zins:
score = xins + wins
if a_score <= b_score and score < b_score:
b_score = score
b_po = 0x06
elif a_score > b_score and score < a_score:
a_score = score
a_po = 0x06
else:
score = yins + zins
if a_score <= b_score and score < b_score:
b_score = score
b_po = 0x09
elif a_score > b_score and score < a_score:
a_score = score
a_po = 0x09
# Decide if (0,1,1,1) is closer.
p1 = 3 - in_sum + xins
if a_score <= b_score and p1 < b_score:
b_score = p1
b_po = 0x0E
b_is_bigger_side = False
elif a_score > b_score and p1 < a_score:
a_score = p1
a_po = 0x0E
a_is_bigger_side = False
# Decide if (1,0,1,1) is closer.
p2 = 3 - in_sum + yins
if a_score <= b_score and p2 < b_score:
b_score = p2
b_po = 0x0D
b_is_bigger_side = False
elif a_score > b_score and p2 < a_score:
a_score = p2
a_po = 0x0D
a_is_bigger_side = False
# Decide if (1,1,0,1) is closer.
p3 = 3 - in_sum + zins
if a_score <= b_score and p3 < b_score:
b_score = p3
b_po = 0x0B
b_is_bigger_side = False
elif a_score > b_score and p3 < a_score:
a_score = p3
a_po = 0x0B
a_is_bigger_side = False
# Decide if (1,1,1,0) is closer.
p4 = 3 - in_sum + wins
if a_score <= b_score and p4 < b_score:
b_po = 0x07
b_is_bigger_side = False
elif a_score > b_score and p4 < a_score:
a_po = 0x07
a_is_bigger_side = False
# Where each of the two closest pos are determines how the extra three vertices are calculated.
if a_is_bigger_side == b_is_bigger_side:
if a_is_bigger_side: # Both closest pos on the bigger side
c1 = (a_po & b_po)
c2 = (a_po | b_po)
# Two contributions are _permutations of (0,0,0,1) and (0,0,0,2) based on c1
xsv_ext0 = xsv_ext1 = xsb
ysv_ext0 = ysv_ext1 = ysb
zsv_ext0 = zsv_ext1 = zsb
wsv_ext0 = wsv_ext1 = wsb
dx_ext0 = dx0 - SQUISH_CONSTANT_4D
dy_ext0 = dy0 - SQUISH_CONSTANT_4D
dz_ext0 = dz0 - SQUISH_CONSTANT_4D
dw_ext0 = dw0 - SQUISH_CONSTANT_4D
dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_4D
dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_4D
dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 - 2 * SQUISH_CONSTANT_4D
if (c1 & 0x01) != 0:
xsv_ext0 += 1
dx_ext0 -= 1
xsv_ext1 += 2
dx_ext1 -= 2
elif (c1 & 0x02) != 0:
ysv_ext0 += 1
dy_ext0 -= 1
ysv_ext1 += 2
dy_ext1 -= 2
elif (c1 & 0x04) != 0:
zsv_ext0 += 1
dz_ext0 -= 1
zsv_ext1 += 2
dz_ext1 -= 2
else:
wsv_ext0 += 1
dw_ext0 -= 1
wsv_ext1 += 2
dw_ext1 -= 2
# One contribution is a _permutation of (1,1,1,-1) based on c2
xsv_ext2 = xsb + 1
ysv_ext2 = ysb + 1
zsv_ext2 = zsb + 1
wsv_ext2 = wsb + 1
dx_ext2 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy_ext2 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz_ext2 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
if (c2 & 0x01) == 0:
xsv_ext2 -= 2
dx_ext2 += 2
elif (c2 & 0x02) == 0:
ysv_ext2 -= 2
dy_ext2 += 2
elif (c2 & 0x04) == 0:
zsv_ext2 -= 2
dz_ext2 += 2
else:
wsv_ext2 -= 2
dw_ext2 += 2
else: # Both closest pos on the smaller side
# One of the two extra pos is (1,1,1,1)
xsv_ext2 = xsb + 1
ysv_ext2 = ysb + 1
zsv_ext2 = zsb + 1
wsv_ext2 = wsb + 1
dx_ext2 = dx0 - 1 - 4 * SQUISH_CONSTANT_4D
dy_ext2 = dy0 - 1 - 4 * SQUISH_CONSTANT_4D
dz_ext2 = dz0 - 1 - 4 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 1 - 4 * SQUISH_CONSTANT_4D
# Other two pos are based on the shared axes.
c = (a_po & b_po)
if (c & 0x01) != 0:
xsv_ext0 = xsb + 2
xsv_ext1 = xsb + 1
dx_ext0 = dx0 - 2 - 3 * SQUISH_CONSTANT_4D
dx_ext1 = dx0 - 1 - 3 * SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsb
dx_ext0 = dx_ext1 = dx0 - 3 * SQUISH_CONSTANT_4D
if (c & 0x02) != 0:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy_ext1 = dy0 - 1 - 3 * SQUISH_CONSTANT_4D
if (c & 0x01) == 0:
ysv_ext0 += 1
dy_ext0 -= 1
else:
ysv_ext1 += 1
dy_ext1 -= 1
else:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy_ext1 = dy0 - 3 * SQUISH_CONSTANT_4D
if (c & 0x04) != 0:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz_ext1 = dz0 - 1 - 3 * SQUISH_CONSTANT_4D
if (c & 0x03) == 0:
zsv_ext0 += 1
dz_ext0 -= 1
else:
zsv_ext1 += 1
dz_ext1 -= 1
else:
zsv_ext0 = zsv_ext1 = zsb
dz_ext0 = dz_ext1 = dz0 - 3 * SQUISH_CONSTANT_4D
if (c & 0x08) != 0:
wsv_ext0 = wsb + 1
wsv_ext1 = wsb + 2
dw_ext0 = dw0 - 1 - 3 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 - 2 - 3 * SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsb
dw_ext0 = dw_ext1 = dw0 - 3 * SQUISH_CONSTANT_4D
else: # One po on each "side"
if a_is_bigger_side:
c1 = a_po
c2 = b_po
else:
c1 = b_po
c2 = a_po
# Two contributions are the bigger-sided po with each 1 replaced with 2.
if (c1 & 0x01) != 0:
xsv_ext0 = xsb + 2
xsv_ext1 = xsb + 1
dx_ext0 = dx0 - 2 - 3 * SQUISH_CONSTANT_4D
dx_ext1 = dx0 - 1 - 3 * SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsb
dx_ext0 = dx_ext1 = dx0 - 3 * SQUISH_CONSTANT_4D
if (c1 & 0x02) != 0:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy_ext1 = dy0 - 1 - 3 * SQUISH_CONSTANT_4D
if (c1 & 0x01) == 0:
ysv_ext0 += 1
dy_ext0 -= 1
else:
ysv_ext1 += 1
dy_ext1 -= 1
else:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy_ext1 = dy0 - 3 * SQUISH_CONSTANT_4D
if (c1 & 0x04) != 0:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz_ext1 = dz0 - 1 - 3 * SQUISH_CONSTANT_4D
if (c1 & 0x03) == 0:
zsv_ext0 += 1
dz_ext0 -= 1
else:
zsv_ext1 += 1
dz_ext1 -= 1
else:
zsv_ext0 = zsv_ext1 = zsb
dz_ext0 = dz_ext1 = dz0 - 3 * SQUISH_CONSTANT_4D
if (c1 & 0x08) != 0:
wsv_ext0 = wsb + 1
wsv_ext1 = wsb + 2
dw_ext0 = dw0 - 1 - 3 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 - 2 - 3 * SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsb
dw_ext0 = dw_ext1 = dw0 - 3 * SQUISH_CONSTANT_4D
# One contribution is a _permutation of (1,1,1,-1) based on the smaller-sided po
xsv_ext2 = xsb + 1
ysv_ext2 = ysb + 1
zsv_ext2 = zsb + 1
wsv_ext2 = wsb + 1
dx_ext2 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy_ext2 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz_ext2 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
if (c2 & 0x01) == 0:
xsv_ext2 -= 2
dx_ext2 += 2
elif (c2 & 0x02) == 0:
ysv_ext2 -= 2
dy_ext2 += 2
elif (c2 & 0x04) == 0:
zsv_ext2 -= 2
dz_ext2 += 2
else:
wsv_ext2 -= 2
dw_ext2 += 2
# Contribution (1,1,1,0)
dx4 = dx0 - 1 - 3 * SQUISH_CONSTANT_4D
dy4 = dy0 - 1 - 3 * SQUISH_CONSTANT_4D
dz4 = dz0 - 1 - 3 * SQUISH_CONSTANT_4D
dw4 = dw0 - 3 * SQUISH_CONSTANT_4D
attn4 = 2 - dx4 * dx4 - dy4 * dy4 - dz4 * dz4 - dw4 * dw4
if attn4 > 0:
attn4 *= attn4
value += attn4 * attn4 * extrapolate(xsb + 1, ysb + 1, zsb + 1, wsb + 0, dx4, dy4, dz4, dw4)
# Contribution (1,1,0,1)
dx3 = dx4
dy3 = dy4
dz3 = dz0 - 3 * SQUISH_CONSTANT_4D
dw3 = dw0 - 1 - 3 * SQUISH_CONSTANT_4D
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 - dw3 * dw3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 1, ysb + 1, zsb + 0, wsb + 1, dx3, dy3, dz3, dw3)
# Contribution (1,0,1,1)
dx2 = dx4
dy2 = dy0 - 3 * SQUISH_CONSTANT_4D
dz2 = dz4
dw2 = dw3
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 - dw2 * dw2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 1, ysb + 0, zsb + 1, wsb + 1, dx2, dy2, dz2, dw2)
# Contribution (0,1,1,1)
dx1 = dx0 - 3 * SQUISH_CONSTANT_4D
dz1 = dz4
dy1 = dy4
dw1 = dw3
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 - dw1 * dw1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 0, ysb + 1, zsb + 1, wsb + 1, dx1, dy1, dz1, dw1)
# Contribution (1,1,0,0)
dx5 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy5 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz5 = dz0 - 0 - 2 * SQUISH_CONSTANT_4D
dw5 = dw0 - 0 - 2 * SQUISH_CONSTANT_4D
attn5 = 2 - dx5 * dx5 - dy5 * dy5 - dz5 * dz5 - dw5 * dw5
if attn5 > 0:
attn5 *= attn5
value += attn5 * attn5 * extrapolate(xsb + 1, ysb + 1, zsb + 0, wsb + 0, dx5, dy5, dz5, dw5)
# Contribution (1,0,1,0)
dx6 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy6 = dy0 - 0 - 2 * SQUISH_CONSTANT_4D
dz6 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw6 = dw0 - 0 - 2 * SQUISH_CONSTANT_4D
attn6 = 2 - dx6 * dx6 - dy6 * dy6 - dz6 * dz6 - dw6 * dw6
if attn6 > 0:
attn6 *= attn6
value += attn6 * attn6 * extrapolate(xsb + 1, ysb + 0, zsb + 1, wsb + 0, dx6, dy6, dz6, dw6)
# Contribution (1,0,0,1)
dx7 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy7 = dy0 - 0 - 2 * SQUISH_CONSTANT_4D
dz7 = dz0 - 0 - 2 * SQUISH_CONSTANT_4D
dw7 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
attn7 = 2 - dx7 * dx7 - dy7 * dy7 - dz7 * dz7 - dw7 * dw7
if attn7 > 0:
attn7 *= attn7
value += attn7 * attn7 * extrapolate(xsb + 1, ysb + 0, zsb + 0, wsb + 1, dx7, dy7, dz7, dw7)
# Contribution (0,1,1,0)
dx8 = dx0 - 0 - 2 * SQUISH_CONSTANT_4D
dy8 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz8 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw8 = dw0 - 0 - 2 * SQUISH_CONSTANT_4D
attn8 = 2 - dx8 * dx8 - dy8 * dy8 - dz8 * dz8 - dw8 * dw8
if attn8 > 0:
attn8 *= attn8
value += attn8 * attn8 * extrapolate(xsb + 0, ysb + 1, zsb + 1, wsb + 0, dx8, dy8, dz8, dw8)
# Contribution (0,1,0,1)
dx9 = dx0 - 0 - 2 * SQUISH_CONSTANT_4D
dy9 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz9 = dz0 - 0 - 2 * SQUISH_CONSTANT_4D
dw9 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
attn9 = 2 - dx9 * dx9 - dy9 * dy9 - dz9 * dz9 - dw9 * dw9
if attn9 > 0:
attn9 *= attn9
value += attn9 * attn9 * extrapolate(xsb + 0, ysb + 1, zsb + 0, wsb + 1, dx9, dy9, dz9, dw9)
# Contribution (0,0,1,1)
dx10 = dx0 - 0 - 2 * SQUISH_CONSTANT_4D
dy10 = dy0 - 0 - 2 * SQUISH_CONSTANT_4D
dz10 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw10 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
attn10 = 2 - dx10 * dx10 - dy10 * dy10 - dz10 * dz10 - dw10 * dw10
if attn10 > 0:
attn10 *= attn10
value += attn10 * attn10 * extrapolate(xsb + 0, ysb + 0, zsb + 1, wsb + 1, dx10, dy10, dz10, dw10)
# First extra vertex
attn_ext0 = 2 - dx_ext0 * dx_ext0 - dy_ext0 * dy_ext0 - dz_ext0 * dz_ext0 - dw_ext0 * dw_ext0
if attn_ext0 > 0:
attn_ext0 *= attn_ext0
value += attn_ext0 * attn_ext0 * extrapolate(xsv_ext0, ysv_ext0, zsv_ext0, wsv_ext0, dx_ext0, dy_ext0, dz_ext0, dw_ext0)
# Second extra vertex
attn_ext1 = 2 - dx_ext1 * dx_ext1 - dy_ext1 * dy_ext1 - dz_ext1 * dz_ext1 - dw_ext1 * dw_ext1
if attn_ext1 > 0:
attn_ext1 *= attn_ext1
value += attn_ext1 * attn_ext1 * extrapolate(xsv_ext1, ysv_ext1, zsv_ext1, wsv_ext1, dx_ext1, dy_ext1, dz_ext1, dw_ext1)
# Third extra vertex
attn_ext2 = 2 - dx_ext2 * dx_ext2 - dy_ext2 * dy_ext2 - dz_ext2 * dz_ext2 - dw_ext2 * dw_ext2
if attn_ext2 > 0:
attn_ext2 *= attn_ext2
value += attn_ext2 * attn_ext2 * extrapolate(xsv_ext2, ysv_ext2, zsv_ext2, wsv_ext2, dx_ext2, dy_ext2, dz_ext2, dw_ext2)
return value / NORM_CONSTANT_4D | 0.001882 |
def plot_feature_histograms(xyzall,
feature_labels=None,
ax=None,
ylog=False,
outfile=None,
n_bins=50,
ignore_dim_warning=False,
**kwargs):
r"""Feature histogram plot
Parameters
----------
xyzall : np.ndarray(T, d)
(Concatenated list of) input features; containing time series data to be plotted.
Array of T data points in d dimensions (features).
feature_labels : iterable of str or pyemma.Featurizer, optional, default=None
Labels of histogramed features, defaults to feature index.
ax : matplotlib.Axes object, optional, default=None.
The ax to plot to; if ax=None, a new ax (and fig) is created.
ylog : boolean, default=False
If True, plot logarithm of histogram values.
n_bins : int, default=50
Number of bins the histogram uses.
outfile : str, default=None
If not None, saves plot to this file.
ignore_dim_warning : boolean, default=False
Enable plotting for more than 50 dimensions (on your own risk).
**kwargs: kwargs passed to pyplot.fill_between. See the doc of pyplot for options.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the historams were plotted.
"""
if not isinstance(xyzall, _np.ndarray):
raise ValueError('Input data hast to be a numpy array. Did you concatenate your data?')
if xyzall.shape[1] > 50 and not ignore_dim_warning:
raise RuntimeError('This function is only useful for less than 50 dimensions. Turn-off this warning '
'at your own risk with ignore_dim_warning=True.')
if feature_labels is not None:
if not isinstance(feature_labels, list):
from pyemma.coordinates.data.featurization.featurizer import MDFeaturizer as _MDFeaturizer
if isinstance(feature_labels, _MDFeaturizer):
feature_labels = feature_labels.describe()
else:
raise ValueError('feature_labels must be a list of feature labels, '
'a pyemma featurizer object or None.')
if not xyzall.shape[1] == len(feature_labels):
raise ValueError('feature_labels must have the same dimension as the input data xyzall.')
# make nice plots if user does not decide on color and transparency
if 'color' not in kwargs.keys():
kwargs['color'] = 'b'
if 'alpha' not in kwargs.keys():
kwargs['alpha'] = .25
import matplotlib.pyplot as _plt
# check input
if ax is None:
fig, ax = _plt.subplots()
else:
fig = ax.get_figure()
hist_offset = -.2
for h, coordinate in enumerate(reversed(xyzall.T)):
hist, edges = _np.histogram(coordinate, bins=n_bins)
if not ylog:
y = hist / hist.max()
else:
y = _np.zeros_like(hist) + _np.NaN
pos_idx = hist > 0
y[pos_idx] = _np.log(hist[pos_idx]) / _np.log(hist[pos_idx]).max()
ax.fill_between(edges[:-1], y + h + hist_offset, y2=h + hist_offset, **kwargs)
ax.axhline(y=h + hist_offset, xmin=0, xmax=1, color='k', linewidth=.2)
ax.set_ylim(hist_offset, h + hist_offset + 1)
# formatting
if feature_labels is None:
feature_labels = [str(n) for n in range(xyzall.shape[1])]
ax.set_ylabel('Feature histograms')
ax.set_yticks(_np.array(range(len(feature_labels))) + .3)
ax.set_yticklabels(feature_labels[::-1])
ax.set_xlabel('Feature values')
# save
if outfile is not None:
fig.savefig(outfile)
return fig, ax | 0.002598 |
def getbalance(self, address: str) -> Decimal:
'''Returns current balance of given address.'''
try:
return Decimal(cast(float, self.ext_fetch('getbalance/' + address)))
except TypeError:
return Decimal(0) | 0.011858 |
def install_from_rpm_py_package(self):
"""Run install from RPM Python binding RPM package."""
self._download_and_extract_rpm_py_package()
# Find ./usr/lib64/pythonN.N/site-packages/rpm directory.
# A binary built by same version Python with used Python is target
# for the safe installation.
if self.rpm.has_set_up_py_in():
# If RPM has setup.py.in, this strict check is okay.
# Because we can still install from the source.
py_dir_name = 'python{0}.{1}'.format(
sys.version_info[0], sys.version_info[1])
else:
# If RPM does not have setup.py.in such as CentOS6,
# Only way to install is by different Python's RPM package.
py_dir_name = '*'
python_lib_dir_pattern = os.path.join(
'usr', '*', py_dir_name, 'site-packages')
rpm_dir_pattern = os.path.join(python_lib_dir_pattern, 'rpm')
downloaded_rpm_dirs = glob.glob(rpm_dir_pattern)
if not downloaded_rpm_dirs:
message = 'Directory with a pattern: {0} not found.'.format(
rpm_dir_pattern)
raise RpmPyPackageNotFoundError(message)
src_rpm_dir = downloaded_rpm_dirs[0]
# Remove rpm directory for the possible installed directories.
for rpm_dir in self.python.python_lib_rpm_dirs:
if os.path.isdir(rpm_dir):
Log.debug("Remove existing rpm directory {0}".format(rpm_dir))
shutil.rmtree(rpm_dir)
dst_rpm_dir = self.python.python_lib_rpm_dir
Log.debug("Copy directory from '{0}' to '{1}'".format(
src_rpm_dir, dst_rpm_dir))
shutil.copytree(src_rpm_dir, dst_rpm_dir)
file_name_pattern = 'rpm-*.egg-info'
rpm_egg_info_pattern = os.path.join(
python_lib_dir_pattern, file_name_pattern)
downloaded_rpm_egg_infos = glob.glob(rpm_egg_info_pattern)
if downloaded_rpm_egg_infos:
existing_rpm_egg_info_pattern = os.path.join(
self.python.python_lib_dir, file_name_pattern)
existing_rpm_egg_infos = glob.glob(existing_rpm_egg_info_pattern)
for existing_rpm_egg_info in existing_rpm_egg_infos:
Log.debug("Remove existing rpm egg info file '{0}'".format(
existing_rpm_egg_info))
os.remove(existing_rpm_egg_info)
Log.debug("Copy file from '{0}' to '{1}'".format(
downloaded_rpm_egg_infos[0], self.python.python_lib_dir))
shutil.copy2(downloaded_rpm_egg_infos[0],
self.python.python_lib_dir) | 0.000729 |
def imshow(*imgs, **options):
"""
Plots multiple images using matplotlib
by dynamically finding the required number of rows and cols.
:param imgs: Images as any number of arguments
:param options: Dict of options
- cmap: Color map for gray scale images
- vmin: Minimum value to be used in color map
- vmax: Maximum value to be used in color map
"""
n = len(imgs)
nrows = int(math.ceil(math.sqrt(n)))
ncols = int(math.ceil(n / nrows))
for row in range(nrows):
for col in range(ncols):
i = row * ncols + col
if i >= n:
break
plt.subplot(nrows, ncols, i+1)
show_img(imgs[i], options)
plt.show() | 0.001361 |
def set_metadata(self, obj, metadata, clear=False, prefix=None):
"""
Accepts a dictionary of metadata key/value pairs and updates the
specified object metadata with them.
If 'clear' is True, any existing metadata is deleted and only the
passed metadata is retained. Otherwise, the values passed here update
the object's metadata.
By default, the standard object metadata prefix ('X-Object-Meta-') is
prepended to the header name if it isn't present. For non-standard
headers, you must include a non-None prefix, such as an empty string.
"""
# Add the metadata prefix, if needed.
if prefix is None:
prefix = OBJECT_META_PREFIX
massaged = _massage_metakeys(metadata, prefix)
cname = utils.get_name(self.container)
oname = utils.get_name(obj)
new_meta = {}
# Note that the API for object POST is the opposite of that for
# container POST: for objects, all current metadata is deleted,
# whereas for containers you need to set the values to an empty
# string to delete them.
if not clear:
obj_meta = self.get_metadata(obj, prefix=prefix)
new_meta = _massage_metakeys(obj_meta, prefix)
utils.case_insensitive_update(new_meta, massaged)
# Remove any empty values, since the object metadata API will
# store them.
to_pop = []
for key, val in six.iteritems(new_meta):
if not val:
to_pop.append(key)
for key in to_pop:
new_meta.pop(key)
uri = "/%s/%s" % (cname, oname)
resp, resp_body = self.api.method_post(uri, headers=new_meta) | 0.001155 |
def closePanel(self):
"""
Closes a full view panel.
"""
# make sure we can close all the widgets in the view first
for i in range(self.count()):
if not self.widget(i).canClose():
return False
container = self.parentWidget()
viewWidget = self.viewWidget()
# close all the child views
for i in xrange(self.count() - 1, -1, -1):
self.widget(i).close()
self.tabBar().clear()
if isinstance(container, XSplitter):
parent_container = container.parentWidget()
if container.count() == 2:
if isinstance(parent_container, XSplitter):
sizes = parent_container.sizes()
widget = container.widget(int(not container.indexOf(self)))
index = parent_container.indexOf(container)
parent_container.insertWidget(index, widget)
container.setParent(None)
container.close()
container.deleteLater()
parent_container.setSizes(sizes)
elif parent_container.parentWidget() == viewWidget:
widget = container.widget(int(not container.indexOf(self)))
widget.setParent(viewWidget)
if projexui.QT_WRAPPER == 'PySide':
_ = viewWidget.takeWidget()
else:
old_widget = viewWidget.widget()
old_widget.setParent(None)
old_widget.close()
old_widget.deleteLater()
QtGui.QApplication.instance().processEvents()
viewWidget.setWidget(widget)
else:
container.setParent(None)
container.close()
container.deleteLater()
else:
self.setFocus()
self._hintLabel.setText(self.hint())
self._hintLabel.show()
return True | 0.003678 |
def add(self, arg1, arg2=None, arg3=None, bucket_type=None):
"""
Start assembling a Map/Reduce operation. A shortcut for
:func:`RiakMapReduce.add`.
:param arg1: the object or bucket to add
:type arg1: RiakObject, string
:param arg2: a key or list of keys to add (if a bucket is
given in arg1)
:type arg2: string, list, None
:param arg3: key data for this input (must be convertible to JSON)
:type arg3: string, list, dict, None
:param bucket_type: Optional name of a bucket type
:type bucket_type: string, None
:rtype: :class:`RiakMapReduce`
"""
mr = RiakMapReduce(self)
return mr.add(arg1, arg2, arg3, bucket_type) | 0.002681 |
def loadcache(json_file):
"""
Loads json file as dictionary, feeds it to monkeycache and spits result
"""
f = open(json_file, 'r')
data = f.read()
f.close()
try:
apicache = json.loads(data)
except ValueError as e:
print("Error processing json:", json_file, e)
return {}
return apicache | 0.002899 |
def GLOBAL(self, node):
"""
Keep track of globals declarations.
"""
global_scope_index = 1 if self._in_doctest() else 0
global_scope = self.scopeStack[global_scope_index]
# Ignore 'global' statement in global scope.
if self.scope is not global_scope:
# One 'global' statement can bind multiple (comma-delimited) names.
for node_name in node.names:
node_value = Assignment(node_name, node)
# Remove UndefinedName messages already reported for this name.
# TODO: if the global is not used in this scope, it does not
# become a globally defined name. See test_unused_global.
self.messages = [
m for m in self.messages if not
isinstance(m, messages.UndefinedName) or
m.message_args[0] != node_name]
# Bind name to global scope if it doesn't exist already.
global_scope.setdefault(node_name, node_value)
# Bind name to non-global scopes, but as already "used".
node_value.used = (global_scope, node)
for scope in self.scopeStack[global_scope_index + 1:]:
scope[node_name] = node_value | 0.001533 |
def _from_dict(cls, _dict):
"""Initialize a LanguageModels object from a json dictionary."""
args = {}
if 'customizations' in _dict:
args['customizations'] = [
LanguageModel._from_dict(x)
for x in (_dict.get('customizations'))
]
else:
raise ValueError(
'Required property \'customizations\' not present in LanguageModels JSON'
)
return cls(**args) | 0.006211 |
def update_buttons(self):
"""Updates the enable status of delete and reset buttons."""
current_scheme = self.current_scheme
names = self.get_option("names")
try:
names.pop(names.index(u'Custom'))
except ValueError:
pass
delete_enabled = current_scheme not in names
self.delete_button.setEnabled(delete_enabled)
self.reset_button.setEnabled(not delete_enabled) | 0.004474 |
def create(self):
"""
Create the local repository (if it doesn't already exist).
:returns: :data:`True` if the local repository was just created,
:data:`False` if it already existed.
What :func:`create()` does depends on the situation:
- When :attr:`exists` is :data:`True` nothing is done.
- When the :attr:`local` repository doesn't exist but a :attr:`remote`
repository location is given, a clone of the remote repository is
created.
- When the :attr:`local` repository doesn't exist and no :attr:`remote`
repository has been specified then a new local repository will be
created.
When :func:`create()` is responsible for creating the :attr:`local`
repository it will make sure the :attr:`bare` option is respected.
"""
if self.exists:
logger.debug("Local %s repository (%s) already exists, ignoring request to create it.",
self.friendly_name, format_path(self.local))
return False
else:
timer = Timer()
if self.remote:
logger.info("Creating local %s repository (%s) by cloning %s ..",
self.friendly_name, format_path(self.local), self.remote)
else:
logger.info("Creating local %s repository (%s) ..",
self.friendly_name, format_path(self.local))
self.context.execute(*self.get_create_command())
logger.debug("Took %s to %s local %s repository.",
timer, "clone" if self.remote else "create",
self.friendly_name)
if self.remote:
self.mark_updated()
# Ensure that all further commands are executed in the local repository.
self.update_context()
return True | 0.003115 |
def get_scale(self):
"""
If exposure was not set in the __init__, get the exposure associated
with this RawImage so that it may be used in other
:class:`~peri.util.RawImage`. This is useful for transferring exposure
parameters to a series of images.
Returns
-------
exposure : tuple of floats
The (emin, emax) which get mapped to (0, 1)
"""
if self.exposure is not None:
return self.exposure
raw = initializers.load_tiff(self.filename)
return raw.min(), raw.max() | 0.003419 |
async def set_ignore_version(request):
"""
This handler expects a POST request of form application/json.
The request body should be formatted as:
{"version": version_ignored}
The POST will 400 in the following scenarios:
1. Sending an empty dict
2. Sending a dict with an empty string
"""
data = await request.json()
if 'version' in data.keys():
ignored_version = data.get('version')
log.debug('Set Ignore Version to {}'.format(ignored_version))
if ignored_version == '':
status = 400
res = {'version': None}
else:
_set_ignored_version(ignored_version)
status = 200
res = {'version': ignored_version}
else:
status = 400
res = {'version': None}
return web.json_response(res, status=status) | 0.001181 |
def revisions(
request, slug, template_name='wakawaka/revisions.html', extra_context=None
):
"""
Displays the list of all revisions for a specific WikiPage
"""
queryset = WikiPage.objects.all()
page = get_object_or_404(queryset, slug=slug)
template_context = {'page': page}
template_context.update(extra_context or {})
return render(request, template_name, template_context) | 0.002433 |
def apply_mask_4d(image, mask_img): # , smooth_mm=None, remove_nans=True):
"""Read a Nifti file nii_file and a mask Nifti file.
Extract the signals in nii_file that are within the mask, the mask indices
and the mask shape.
Parameters
----------
image: img-like object or boyle.nifti.NeuroImage or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
mask_img: img-like object or boyle.nifti.NeuroImage or str
3D mask array: True where a voxel should be used.
See img description.
smooth_mm: float #TBD
(optional) The size in mm of the FWHM Gaussian kernel to smooth the signal.
If True, remove_nans is True.
remove_nans: bool #TBD
If remove_nans is True (default), the non-finite values (NaNs and
infs) found in the images will be replaced by zeros.
Returns
-------
session_series, mask_data
session_series: numpy.ndarray
2D array of series with shape (voxel number, image number)
Note
----
nii_file and mask_file must have the same shape.
Raises
------
FileNotFound, NiftiFilesNotCompatible
"""
img = check_img(image)
mask = check_img(mask_img)
check_img_compatibility(img, mask, only_check_3d=True)
vol = get_data(img)
series, mask_data = _apply_mask_to_4d_data(vol, mask)
return series, mask_data | 0.002367 |
def pkcs12_key_as_pem(private_key_bytes, private_key_password):
"""Convert the contents of a PKCS#12 key to PEM using pyOpenSSL.
Args:
private_key_bytes: Bytes. PKCS#12 key in DER format.
private_key_password: String. Password for PKCS#12 key.
Returns:
String. PEM contents of ``private_key_bytes``.
"""
private_key_password = _helpers._to_bytes(private_key_password)
pkcs12 = crypto.load_pkcs12(private_key_bytes, private_key_password)
return crypto.dump_privatekey(crypto.FILETYPE_PEM,
pkcs12.get_privatekey()) | 0.001667 |
def get_compositions_by_search(self, composition_query, composition_search):
"""Gets the search results matching the given search query using the given search.
arg: composition_query (osid.repository.CompositionQuery):
the composition query
arg: composition_search (osid.repository.CompositionSearch):
the composition search
return: (osid.repository.CompositionSearchResults) - the
composition search results
raise: NullArgument - ``composition_query`` or
``composition_search`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``composition_query`` or
``composition_search`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceSearchSession.get_resources_by_search_template
# Copied from osid.resource.ResourceQuerySession.get_resources_by_query_template
and_list = list()
or_list = list()
for term in composition_query._query_terms:
and_list.append({term: composition_query._query_terms[term]})
for term in composition_query._keyword_terms:
or_list.append({term: composition_query._keyword_terms[term]})
if composition_search._id_list is not None:
identifiers = [ObjectId(i.identifier) for i in composition_search._id_list]
and_list.append({'_id': {'$in': identifiers}})
if or_list:
and_list.append({'$or': or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {'$and': and_list}
collection = JSONClientValidated('repository',
collection='Composition',
runtime=self._runtime)
if composition_search.start is not None and composition_search.end is not None:
result = collection.find(query_terms)[composition_search.start:composition_search.end]
else:
result = collection.find(query_terms)
return searches.CompositionSearchResults(result, dict(composition_query._query_terms), runtime=self._runtime) | 0.003299 |
def get_telex_definition(w_shorthand=True, brackets_shorthand=True):
"""Create a definition dictionary for the TELEX input method
Args:
w_shorthand (optional): allow a stand-alone w to be
interpreted as an ư. Default to True.
brackets_shorthand (optional, True): allow typing ][ as
shorthand for ươ. Default to True.
Returns a dictionary to be passed into process_key().
"""
telex = {
"a": "a^",
"o": "o^",
"e": "e^",
"w": ["u*", "o*", "a+"],
"d": "d-",
"f": "\\",
"s": "/",
"r": "?",
"x": "~",
"j": ".",
}
if w_shorthand:
telex["w"].append('<ư')
if brackets_shorthand:
telex.update({
"]": "<ư",
"[": "<ơ",
"}": "<Ư",
"{": "<Ơ"
})
return telex | 0.001142 |
def remove_stage_from_deployed_values(key, filename):
# type: (str, str) -> None
"""Delete a top level key from the deployed JSON file."""
final_values = {} # type: Dict[str, Any]
try:
with open(filename, 'r') as f:
final_values = json.load(f)
except IOError:
# If there is no file to delete from, then this funciton is a noop.
return
try:
del final_values[key]
with open(filename, 'wb') as f:
data = serialize_to_json(final_values)
f.write(data.encode('utf-8'))
except KeyError:
# If they key didn't exist then there is nothing to remove.
pass | 0.001502 |
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer) | 0.003063 |
def _coarsegrain_space(coarse_grain, is_cut, system):
"""Spatially coarse-grain the TPM and CM."""
tpm = coarse_grain.macro_tpm(
system.tpm, check_independence=(not is_cut))
node_indices = coarse_grain.macro_indices
state = coarse_grain.macro_state(system.state)
# Universal connectivity, for now.
n = len(node_indices)
cm = np.ones((n, n))
return SystemAttrs(tpm, cm, node_indices, state) | 0.004274 |
def fit(self, X, y=None, **kwargs):
"""Fit the underlying estimator.
Parameters
----------
X, y : array-like
**kwargs
Additional fit-kwargs for the underlying estimator.
Returns
-------
self : object
"""
logger.info("Starting fit")
with _timer("fit", _logger=logger):
result = self.estimator.fit(X, y, **kwargs)
# Copy over learned attributes
copy_learned_attributes(result, self)
copy_learned_attributes(result, self.estimator)
return self | 0.003407 |
def _getFileNumber(self, filename):
"""
Given a file name, get its file number (if any).
@param filename: A C{str} file name.
@return: An C{int} file number or C{None} if no file with that name
has been added.
"""
cur = self._connection.cursor()
cur.execute('SELECT id FROM files WHERE name = ?', (filename,))
row = cur.fetchone()
if row is None:
return None
else:
return row[0] | 0.00404 |
def titleOf(self, url):
"""
Returns the title for the inputed url.
:param url | <str>
:return <str>
"""
for m_url, m_title in self._stack:
if url == m_url:
return m_title
return nativestring(url).split('/')[-1] | 0.01462 |
def close(self):
"""Close the http/https connect."""
try:
self.response.close()
self.logger.debug("close connect succeed.")
except Exception as e:
self.unknown("close connect error: %s" % e) | 0.008 |
def feedback(self):
"""
Access the feedback
:returns: twilio.rest.api.v2010.account.message.feedback.FeedbackList
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackList
"""
if self._feedback is None:
self._feedback = FeedbackList(
self._version,
account_sid=self._solution['account_sid'],
message_sid=self._solution['sid'],
)
return self._feedback | 0.00409 |
def encode(self, obj):
""" Add the given object to the result.
"""
if isinstance(obj, int_like_types):
self.result.append("i%de" % obj)
elif isinstance(obj, string_types):
self.result.extend([str(len(obj)), ':', str(obj)])
elif hasattr(obj, "__bencode__"):
self.encode(obj.__bencode__())
elif hasattr(obj, "items"):
# Dictionary
self.result.append('d')
for key, val in sorted(obj.items()):
key = str(key)
self.result.extend([str(len(key)), ':', key])
self.encode(val)
self.result.append('e')
else:
# Treat as iterable
try:
items = iter(obj)
except TypeError as exc:
raise BencodeError("Unsupported non-iterable object %r of type %s (%s)" % (
obj, type(obj), exc
))
else:
self.result.append('l')
for item in items:
self.encode(item)
self.result.append('e')
return self.result | 0.002597 |
def _annotated_unpack_infer(stmt, context=None):
"""
Recursively generate nodes inferred by the given statement.
If the inferred value is a list or a tuple, recurse on the elements.
Returns an iterator which yields tuples in the format
('original node', 'infered node').
"""
if isinstance(stmt, (astroid.List, astroid.Tuple)):
for elt in stmt.elts:
inferred = utils.safe_infer(elt)
if inferred and inferred is not astroid.Uninferable:
yield elt, inferred
return
for infered in stmt.infer(context):
if infered is astroid.Uninferable:
continue
yield stmt, infered | 0.001477 |
def random_val(index, tune_params):
"""return a random value for a parameter"""
key = list(tune_params.keys())[index]
return random.choice(tune_params[key]) | 0.005952 |
def do_update(pool,request,models):
"unlike *_check() below, update doesn't worry about missing children"
return {k:fkapply(models,pool,process_update,missing_update,k,v) for k,v in request.items()} | 0.064356 |
def get_address(self, account_id, address_id, **params):
"""https://developers.coinbase.com/api/v2#show-addresss"""
response = self._get('v2', 'accounts', account_id, 'addresses', address_id, params=params)
return self._make_api_object(response, Address) | 0.010791 |
def from_tuples(cls, tups):
"""
Create a new IntervalTree from an iterable of 2- or 3-tuples,
where the tuple lists begin, end, and optionally data.
"""
ivs = [Interval(*t) for t in tups]
return IntervalTree(ivs) | 0.007663 |
def install_webpi(name, install_args=None, override_args=False):
'''
Instructs Chocolatey to install a package via the Microsoft Web PI service.
name
The name of the package to be installed. Only accepts a single argument.
install_args
A list of install arguments you want to pass to the installation process
i.e product key or feature list
override_args
Set to true if you want to override the original install arguments (for
the native installer) in the package and use your own. When this is set
to False install_args will be appended to the end of the default
arguments
CLI Example:
.. code-block:: bash
salt '*' chocolatey.install_webpi <package name>
salt '*' chocolatey.install_webpi <package name> install_args=<args> override_args=True
'''
return install(name,
source='webpi',
install_args=install_args,
override_args=override_args) | 0.003953 |
def set_admin(msg, handler):
"""Handle admin verification responses from NickServ.
| If NickServ tells us that the nick is authed, mark it as verified.
"""
if handler.config['feature']['servicestype'] == "ircservices":
match = re.match("STATUS (.*) ([0-3])", msg)
elif handler.config['feature']['servicestype'] == "atheme":
match = re.match("(.*) ACC ([0-3])", msg)
if match:
status = int(match.group(2))
nick = match.group(1)
if status != 3:
return
with handler.db.session_scope() as session:
admin = session.query(Permissions).filter(Permissions.nick == nick).first()
if admin is None:
session.add(Permissions(nick=nick, role='admin', registered=True, time=datetime.now()))
else:
admin.registered = True
admin.time = datetime.now() | 0.003322 |
def split_results(self):
"""
Convenience method to separate failed and successful results.
.. versionadded:: 2.0.0
This function will split the results of the failed operation
(see :attr:`.all_results`) into "good" and "bad" dictionaries.
The intent is for the application to handle any successful
results in a success code path, and handle any failed results
in a "retry" code path. For example
.. code-block:: python
try:
cb.add_multi(docs)
except CouchbaseTransientError as e:
# Temporary failure or server OOM
_, fail = e.split_results()
# Sleep for a bit to reduce the load on the server
time.sleep(0.5)
# Try to add only the failed results again
cb.add_multi(fail)
Of course, in the example above, the second retry may fail as
well, and a more robust implementation is left as an exercise
to the reader.
:return: A tuple of ( `ok`, `bad` ) dictionaries.
"""
ret_ok, ret_fail = {}, {}
count = 0
nokey_prefix = ([""] + sorted(filter(bool, self.all_results.keys())))[-1]
for key, v in self.all_results.items():
if not key:
key = nokey_prefix + ":nokey:" + str(count)
count += 1
success = getattr(v,'success', True)
if success:
ret_ok[key] = v
else:
ret_fail[key] = v
return ret_ok, ret_fail | 0.002494 |
def plot_samples(
ax,
sampler,
modelidx=0,
sed=True,
n_samples=100,
e_unit=u.eV,
e_range=None,
e_npoints=100,
threads=None,
label=None,
last_step=False,
):
"""Plot a number of samples from the sampler chain.
Parameters
----------
ax : `matplotlib.Axes`
Axes to plot on.
sampler : `emcee.EnsembleSampler`
Sampler
modelidx : int, optional
Model index. Default is 0
sed : bool, optional
Whether to plot SED or differential spectrum. If `None`, the units of
the observed spectrum will be used.
n_samples : int, optional
Number of samples to plot. Default is 100.
e_unit : :class:`~astropy.units.Unit` or str parseable to unit
Unit in which to plot energy axis.
e_range : list of `~astropy.units.Quantity`, length 2, optional
Limits in energy for the computation of the model samples and ML model.
Note that setting this parameter will mean that the samples for the
model are recomputed and depending on the model speed might be quite
slow.
e_npoints : int, optional
How many points to compute for the model samples and ML model if
`e_range` is set.
threads : int, optional
How many parallel processing threads to use when computing the samples.
Defaults to the number of available cores.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True,
default) or the whole chain (False).
"""
modelx, model = _read_or_calc_samples(
sampler,
modelidx,
last_step=last_step,
e_range=e_range,
e_npoints=e_npoints,
threads=threads,
)
# pick first model sample for units
f_unit, sedf = sed_conversion(modelx, model[0].unit, sed)
sample_alpha = min(5.0 / n_samples, 0.5)
for my in model[np.random.randint(len(model), size=n_samples)]:
ax.loglog(
modelx.to(e_unit).value,
(my * sedf).to(f_unit).value,
color=(0.1,) * 3,
alpha=sample_alpha,
lw=1.0,
)
_plot_MLmodel(ax, sampler, modelidx, e_range, e_npoints, e_unit, sed)
if label is not None:
ax.set_ylabel(
"{0} [{1}]".format(label, f_unit.to_string("latex_inline"))
) | 0.000423 |
def update_subscription(netid, action, subscription_code, data_field=None):
"""
Post a subscription action for the given netid and subscription_code
"""
url = '{0}/subscription.json'.format(url_version())
action_list = []
if isinstance(subscription_code, list):
for code in subscription_code:
action_list.append(_set_action(
netid, action, code, data_field))
else:
action_list.append(_set_action(
netid, action, subscription_code, data_field))
body = {'actionList': action_list}
response = post_resource(url, json.dumps(body))
return _json_to_subscription_post_response(response) | 0.001477 |
def argparse_funckw(func, defaults={}, **kwargs):
"""
allows kwargs to be specified on the commandline from testfuncs
Args:
func (function):
Kwargs:
lbl, verbose, only_specified, force_keys, type_hint, alias_dict
Returns:
dict: funckw
CommandLine:
python -m utool.util_inspect argparse_funckw
SeeAlso:
exec_funckw
recursive_parse_kwargs
parse_kwarg_keys
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> func = get_instance_attrnames
>>> funckw = argparse_funckw(func)
>>> result = ('funckw = %s' % (ut.repr3(funckw),))
>>> print(result)
funckw = {
'default': True,
'with_methods': True,
'with_properties': True,
}
"""
import utool as ut
funckw_ = ut.get_funckw(func, recursive=True)
funckw_.update(defaults)
funckw = ut.argparse_dict(funckw_, **kwargs)
return funckw | 0.000957 |
def breadcrumb(self):
"""List of ``(url, title)`` tuples defining the current breadcrumb
path.
"""
if self.path == '.':
return []
path = self.path
breadcrumb = [((self.url_ext or '.'), self.title)]
while True:
path = os.path.normpath(os.path.join(path, '..'))
if path == '.':
break
url = (url_from_path(os.path.relpath(path, self.path)) + '/' +
self.url_ext)
breadcrumb.append((url, self.gallery.albums[path].title))
breadcrumb.reverse()
return breadcrumb | 0.003195 |
def broadcast_tx(cls, tx_hex): # pragma: no cover
"""Broadcasts a transaction to the blockchain.
:param tx_hex: A signed transaction in hex form.
:type tx_hex: ``str``
:raises ConnectionError: If all API services fail.
"""
success = None
for api_call in cls.BROADCAST_TX_MAIN:
try:
success = api_call(tx_hex)
if not success:
continue
return
except cls.IGNORED_ERRORS:
pass
if success is False:
raise ConnectionError('Transaction broadcast failed, or '
'Unspents were already used.')
raise ConnectionError('All APIs are unreachable.') | 0.002618 |
def plot_spectra(self, nmax, convention='power', unit='per_l', base=10.,
maxcolumns=3, xscale='lin', yscale='log', grid=True,
xlim=(None, None), ylim=(None, None), show=True,
title=True, axes_labelsize=None, tick_labelsize=None,
title_labelsize=None, ax=None, fname=None):
"""
Plot the spectra of the best-concentrated Slepian functions.
Usage
-----
x.plot_spectra(nmax, [convention, unit, base, maxcolumns, xscale,
yscale, grid, xlim, ylim, show, title,
axes_labelsize, tick_labelsize, title_labelsize,
ax, fname])
Parameters
----------
nmax : int
The number of Slepian functions to plot.
convention : str, optional, default = 'power'
The type of spectra to plot: 'power' for power spectrum, and
'energy' for energy spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
maxcolumns : int, optional, default = 3
The maximum number of columns to use when plotting the spectra
of multiple localization windows.
xscale : str, optional, default = 'lin'
Scale of the x axis: 'lin' for linear or 'log' for logarithmic.
yscale : str, optional, default = 'log'
Scale of the y axis: 'lin' for linear or 'log' for logarithmic.
grid : bool, optional, default = True
If True, plot grid lines.
xlim : tuple, optional, default = (None, None)
The upper and lower limits used for the x axis.
ylim : tuple, optional, default = (None, None)
The lower and upper limits used for the y axis.
show : bool, optional, default = True
If True, plot the image to the screen.
title : bool, optional, default = True
If True, plot a legend on top of each subplot providing the taper
number and 1 minus the concentration factor.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
title_labelsize : int, optional, default = None
The font size for the subplot titles.
ax : matplotlib axes object, optional, default = None
An array of matplotlib axes objects where the plots will appear.
fname : str, optional, default = None
If present, save the image to the file.
"""
if axes_labelsize is None:
axes_labelsize = _mpl.rcParams['axes.labelsize']
if tick_labelsize is None:
tick_labelsize = _mpl.rcParams['xtick.labelsize']
if title_labelsize is None:
title_labelsize = _mpl.rcParams['axes.titlesize']
degrees = self.degrees()
spectrum = self.spectra(nmax=nmax, convention=convention, unit=unit,
base=base)
ncolumns = min(maxcolumns, nmax)
nrows = _np.ceil(nmax / ncolumns).astype(int)
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0]
* 0.7 * nrows / ncolumns + 0.41)
if ax is None:
fig, axes = _plt.subplots(nrows, ncolumns, figsize=figsize,
sharex='all', sharey='all')
else:
if hasattr(ax, 'flatten') and ax.size < nmax:
raise ValueError('ax.size must be greater or equal to nmax. ' +
'nmax = {:s}'.format(repr(nmax)) +
' and ax.size = {:s}.'.format(repr(ax.size)))
axes = ax
if ax is None:
if nrows > 1:
for axtemp in axes[:-1, :].flatten():
for xlabel_i in axtemp.get_xticklabels():
xlabel_i.set_visible(False)
axtemp.set_xlabel('', visible=False)
for axtemp in axes[:, 1:].flatten():
for ylabel_i in axtemp.get_yticklabels():
ylabel_i.set_visible(False)
axtemp.set_ylabel('', visible=False)
elif nmax > 1:
for axtemp in axes[1:].flatten():
for ylabel_i in axtemp.get_yticklabels():
ylabel_i.set_visible(False)
axtemp.set_ylabel('', visible=False)
if ylim == (None, None):
upper = spectrum[:, :min(self.nmax, nmax)].max()
lower = upper * 1.e-6
ylim = (lower, 5 * upper)
if xlim == (None, None):
if xscale == 'lin':
xlim = (degrees[0], degrees[-1])
for alpha in range(min(self.nmax, nmax)):
evalue = self.eigenvalues[alpha]
if min(self.nmax, nmax) == 1 and ax is None:
axtemp = axes
elif hasattr(axes, 'flatten'):
axtemp = axes.flatten()[alpha]
else:
axtemp = axes[alpha]
if (convention == 'power'):
axtemp.set_ylabel('Power', fontsize=axes_labelsize)
else:
axtemp.set_ylabel('Energy', fontsize=axes_labelsize)
if yscale == 'log':
axtemp.set_yscale('log', basey=base)
if xscale == 'log':
axtemp.set_xscale('log', basex=base)
axtemp.plot(degrees[1:], spectrum[1:, alpha],
label='#{:d} [loss={:2.2g}]'
.format(alpha, 1-evalue))
else:
axtemp.plot(degrees[0:], spectrum[0:, alpha],
label='#{:d} [loss={:2.2g}]'
.format(alpha, 1-evalue))
axtemp.set_xlabel('Spherical harmonic degree',
fontsize=axes_labelsize)
axtemp.set(xlim=xlim, ylim=ylim)
axtemp.minorticks_on()
axtemp.grid(grid, which='major')
axtemp.tick_params(labelsize=tick_labelsize)
if title is True:
axtemp.set_title('#{:d} [loss={:2.2g}]'
.format(alpha, 1-evalue),
fontsize=title_labelsize)
if ax is None:
fig.tight_layout(pad=0.5)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes | 0.000848 |
def keyPress(self, key):
""" Send a key press to the server
key: string: either [a-z] or a from KEYMAP
"""
log.debug('keyPress %s', key)
self.keyDown(key)
self.keyUp(key)
return self | 0.008197 |
def check_recursion_depth(self):
"""Check recursion depth, raise AsyncRecursionError if too deep."""
from furious.async import MAX_DEPTH
recursion_options = self._options.get('_recursion', {})
max_depth = recursion_options.get('max', MAX_DEPTH)
# Check if recursion check has been disabled, then check depth.
if (max_depth != DISABLE_RECURSION_CHECK and
self.recursion_depth > max_depth):
raise errors.AsyncRecursionError('Max recursion depth reached.') | 0.00565 |
def _add_item(self, dim_vals, data, sort=True, update=True):
"""
Adds item to the data, applying dimension types and ensuring
key conforms to Dimension type and values.
"""
sort = sort and self.sort
if not isinstance(dim_vals, tuple):
dim_vals = (dim_vals,)
self._item_check(dim_vals, data)
# Apply dimension types
dim_types = zip([kd.type for kd in self.kdims], dim_vals)
dim_vals = tuple(v if None in [t, v] else t(v) for t, v in dim_types)
valid_vals = zip(self.kdims, dim_vals)
for dim, val in valid_vals:
if dim.values and val is not None and val not in dim.values:
raise KeyError('%s dimension value %s not in'
' specified dimension values.' % (dim, repr(val)))
# Updates nested data structures rather than simply overriding them.
if (update and (dim_vals in self.data)
and isinstance(self.data[dim_vals], (MultiDimensionalMapping, OrderedDict))):
self.data[dim_vals].update(data)
else:
self.data[dim_vals] = data
if sort:
self._resort() | 0.004184 |
def convert2wkt(self, set3D=True):
"""
export the geometry of each feature as a wkt string
Parameters
----------
set3D: bool
keep the third (height) dimension?
Returns
-------
"""
features = self.getfeatures()
for feature in features:
try:
feature.geometry().Set3D(set3D)
except AttributeError:
dim = 3 if set3D else 2
feature.geometry().SetCoordinateDimension(dim)
return [feature.geometry().ExportToWkt() for feature in features] | 0.004878 |
def execute(self, statement, params = ()):
"""
execute sql statement. optionally you can give multiple statements
to save on cursor creation and closure
"""
con = self.__con or self.connection
cur = self.__cur or con.cursor()
if isinstance(statement, list) == False: # we expect to receive instructions in list
statement = [statement]
params = [params]
for state, param in zip(statement, params):
logger.debug("%s %s" % (state, param))
cur.execute(state, param)
if not self.__con:
con.commit()
cur.close()
self.register_modification() | 0.010101 |
def login(self, username, password):
"""
Log into the WS interface and get the authentication token
if login is:
- accepted, returns True
- refused, returns False
In case of any error, raises a BackendException
:param username: login name
:type username: str
:param password: password
:type password: str
:param generate: Can have these values: enabled | force | disabled
:type generate: str
:return: return True if authentication is successfull, otherwise False
:rtype: bool
"""
logger.debug("login for: %s", username)
# Configured as not authenticated WS
if not username and not password:
self.set_token(token=None)
return False
if not username or not password:
logger.error("Username or password cannot be None!")
self.set_token(token=None)
return False
endpoint = 'login'
json = {'username': username, 'password': password}
response = self.get_response(method='POST', endpoint=endpoint, json=json)
if response.status_code == 401:
logger.error("Access denied to %s", self.url_endpoint_root)
self.set_token(token=None)
return False
resp = self.decode(response=response)
if 'token' in resp:
self.set_token(token=resp['token'])
return True
return False | 0.002015 |
def compute_diffusion_maps(lapl_type, diffusion_map, lambdas, diffusion_time):
""" Credit to Satrajit Ghosh (http://satra.cogitatum.org/) for final steps """
# Check that diffusion maps is using the correct laplacian, warn otherwise
if lapl_type not in ['geometric', 'renormalized']:
warnings.warn("for correct diffusion maps embedding use laplacian type 'geometric' or 'renormalized'.")
# Step 5 of diffusion maps:
vectors = diffusion_map.copy()
psi = vectors/vectors[:,[0]]
diffusion_times = diffusion_time
if diffusion_time == 0:
lambdas = np.abs(lambdas)
diffusion_times = np.exp(1. - np.log(1 - lambdas[1:])/np.log(lambdas[1:]))
lambdas = lambdas / (1 - lambdas)
else:
lambdas = np.abs(lambdas)
lambdas = lambdas ** float(diffusion_time)
diffusion_map = psi * lambdas
return diffusion_map | 0.006772 |
def framework_version_from_tag(image_tag):
"""Extract the framework version from the image tag.
Args:
image_tag (str): Image tag, which should take the form '<framework_version>-<device>-<py_version>'
Returns:
str: The framework version.
"""
tag_pattern = re.compile('^(.*)-(cpu|gpu)-(py2|py3)$')
tag_match = tag_pattern.match(image_tag)
return None if tag_match is None else tag_match.group(1) | 0.004545 |
def cmd_delete(args):
"""Deletes a node"""
major = args.get(0)
minor = args.get(1)
if major is not None:
if major in penStore.data:
if minor is None:
if len(penStore.data[major]) > 0:
if raw_input("are you sure (y/n)? ") not in ['y', 'Y', 'yes', 'Yes']:
return ExitStatus.ABORT
penStore.deleteList(major)
puts("list deleted")
elif minor in penStore.data[major]:
penStore.deleteNote(major, minor)
puts("note deleted")
else:
puts("no such note, sorry! (%s)" % minor)
else:
puts("no such list, sorry! (%s)" % major)
else:
print """
- pen: delete help ------------------------------------------------------------
pen delete <list> deletes list and all of its notes
pen delete <list> <note> deletes note
""" | 0.003125 |
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
r = form.save(commit=False)
parent_id = request.REQUEST.get('parent_id', None)
if parent_id:
parent = Folder.objects.get(id=parent_id)
r.parent = parent
return r | 0.004662 |
def upload(self, params={}):
"""start uploading the file until upload is complete or error.
This is the main method to used, If you do not care about
state of process.
Args:
params: a dict object describe video info, eg title,
tags, description, category.
all video params see the doc of prepare_video_params.
Returns:
return video_id if upload successfully
"""
if self.upload_token is not None:
# resume upload
status = self.check()
if status['status'] != 4:
return self.commit()
else:
self.new_slice()
while self.slice_task_id != 0:
self.upload_slice()
return self.commit()
else:
# new upload
self.create(self.prepare_video_params(**params))
self.create_file()
self.new_slice()
while self.slice_task_id != 0:
self.upload_slice()
return self.commit() | 0.001797 |
def update(gandi, fqdn, name, type, value, ttl, file):
"""Update record entry for a domain.
--file option will ignore other parameters and overwrite current zone
content with provided file content.
"""
domains = gandi.dns.list()
domains = [domain['fqdn'] for domain in domains]
if fqdn not in domains:
gandi.echo('Sorry domain %s does not exist' % fqdn)
gandi.echo('Please use one of the following: %s' % ', '.join(domains))
return
content = ''
if file:
content = file.read()
elif not sys.stdin.isatty():
content = click.get_text_stream('stdin').read()
content = content.strip()
if not content and not name and not type and not value:
click.echo('Cannot find parameters for zone content to update.')
return
if name and type and not value:
click.echo('You must provide one or more value parameter.')
return
result = gandi.dns.update_record(fqdn, name, type, value, ttl, content)
gandi.echo(result['message']) | 0.000958 |
def create_conversation(self, body, recipients, attachment_ids=None, context_code=None, filter=None, filter_mode=None, group_conversation=None, media_comment_id=None, media_comment_type=None, mode=None, scope=None, subject=None, user_note=None):
"""
Create a conversation.
Create a new conversation with one or more recipients. If there is already
an existing private conversation with the given recipients, it will be
reused.
"""
path = {}
data = {}
params = {}
# REQUIRED - recipients
"""An array of recipient ids. These may be user ids or course/group ids
prefixed with "course_" or "group_" respectively, e.g.
recipients[]=1&recipients[]=2&recipients[]=course_3"""
data["recipients"] = recipients
# OPTIONAL - subject
"""The subject of the conversation. This is ignored when reusing a
conversation. Maximum length is 255 characters."""
if subject is not None:
data["subject"] = subject
# REQUIRED - body
"""The message to be sent"""
data["body"] = body
# OPTIONAL - group_conversation
"""Defaults to false. If true, this will be a group conversation (i.e. all
recipients may see all messages and replies). If false, individual private
conversations will be started with each recipient. Must be set false if the
number of recipients is over the set maximum (default is 100)."""
if group_conversation is not None:
data["group_conversation"] = group_conversation
# OPTIONAL - attachment_ids
"""An array of attachments ids. These must be files that have been previously
uploaded to the sender's "conversation attachments" folder."""
if attachment_ids is not None:
data["attachment_ids"] = attachment_ids
# OPTIONAL - media_comment_id
"""Media comment id of an audio of video file to be associated with this
message."""
if media_comment_id is not None:
data["media_comment_id"] = media_comment_id
# OPTIONAL - media_comment_type
"""Type of the associated media file"""
if media_comment_type is not None:
self._validate_enum(media_comment_type, ["audio", "video"])
data["media_comment_type"] = media_comment_type
# OPTIONAL - user_note
"""Will add a faculty journal entry for each recipient as long as the user
making the api call has permission, the recipient is a student and
faculty journals are enabled in the account."""
if user_note is not None:
data["user_note"] = user_note
# OPTIONAL - mode
"""Determines whether the messages will be created/sent synchronously or
asynchronously. Defaults to sync, and this option is ignored if this is a
group conversation or there is just one recipient (i.e. it must be a bulk
private message). When sent async, the response will be an empty array
(batch status can be queried via the {api:ConversationsController#batches batches API})"""
if mode is not None:
self._validate_enum(mode, ["sync", "async"])
data["mode"] = mode
# OPTIONAL - scope
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
data["scope"] = scope
# OPTIONAL - filter
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if filter is not None:
data["filter"] = filter
# OPTIONAL - filter_mode
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
data["filter_mode"] = filter_mode
# OPTIONAL - context_code
"""The course or group that is the context for this conversation. Same format
as courses or groups in the recipients argument."""
if context_code is not None:
data["context_code"] = context_code
self.logger.debug("POST /api/v1/conversations with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/conversations".format(**path), data=data, params=params, no_data=True) | 0.00413 |
def contians_attribute(self, attribute):
"""
Returns how many cards in the deck have the specified attribute.
This method requires a library to be stored in the deck instance and
will return `None` if there is no library.
"""
if self.library is None:
return 0
load = self.library.load_card
matches = 0
for code in self.cards:
card = load(code)
if card.has_attribute(attribute):
matches += 1
return matches | 0.003711 |
def to_python(self, data):
"""
Adds support for txtinfo format
"""
try:
return super(OlsrParser, self).to_python(data)
except ConversionException as e:
return self._txtinfo_to_jsoninfo(e.data) | 0.007813 |
def _fix_unmapped(mapped_file, unmapped_file, data):
"""
The unmapped.bam file up until at least Tophat 2.1.1 is broken in various
ways, see https://github.com/cbrueffer/tophat-recondition for details.
Run TopHat-Recondition to fix these issues.
"""
out_file = os.path.splitext(unmapped_file)[0] + "_fixup.bam"
if file_exists(out_file):
return out_file
assert os.path.dirname(mapped_file) == os.path.dirname(unmapped_file)
cmd = config_utils.get_program("tophat-recondition", data)
cmd += " -q"
tophat_out_dir = os.path.dirname(mapped_file)
tophat_logfile = os.path.join(tophat_out_dir, 'tophat-recondition.log')
with file_transaction(data, tophat_logfile) as tx_logfile:
cmd += ' --logfile %s' % tx_logfile
cmd += " -m %s" % mapped_file
cmd += " -u %s" % unmapped_file
cmd += " %s" % tophat_out_dir
do.run(cmd, "Fixing unmapped reads with Tophat-Recondition.", None)
return out_file | 0.001009 |
def parse_plotFingerprint(self):
"""Find plotFingerprint output. Both --outQualityMetrics and --outRawCounts"""
self.deeptools_plotFingerprintOutQualityMetrics = dict()
for f in self.find_log_files('deeptools/plotFingerprintOutQualityMetrics'):
parsed_data = self.parsePlotFingerprintOutQualityMetrics(f)
for k, v in parsed_data.items():
if k in self.deeptools_plotFingerprintOutQualityMetrics:
log.warning("Replacing duplicate sample {}.".format(k))
# Values are fractions - convert to percentages for consistency with other MultiQC output
self.deeptools_plotFingerprintOutQualityMetrics[k] = { i:float(j)*100.0 for i,j in v.items() }
if len(parsed_data) > 0:
self.add_data_source(f, section='plotFingerprint')
self.deeptools_plotFingerprintOutRawCounts= dict()
for f in self.find_log_files('deeptools/plotFingerprintOutRawCounts'):
parsed_data = self.parsePlotFingerprintOutRawCounts(f)
for k, v in parsed_data.items():
if k in self.deeptools_plotFingerprintOutRawCounts:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_plotFingerprintOutRawCounts[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section='plotFingerprint')
if len(self.deeptools_plotFingerprintOutRawCounts) > 0:
self.add_section(name="Fingerprint plot",
anchor="deeptools_fingerprint",
description="Signal fingerprint according to plotFingerprint",
plot=linegraph.plot(
self.deeptools_plotFingerprintOutRawCounts,
{
'id': 'deeptools_fingerprint_plot',
'title': 'deepTools: Fingerprint plot',
'xmin': 0.0,
'xmax': 1.0,
'ymin': 0.0,
'ymax': 1.0,
'xlab': 'rank',
'ylab': 'Fraction w.r.t. bin with highest coverage'
}
))
if len(self.deeptools_plotFingerprintOutQualityMetrics) > 0:
self.add_section(name="Fingerprint quality metrics",
anchor="plotFingerprint",
description="Various quality metrics returned by plotFingerprint",
plot=linegraph.plot(
self.deeptools_plotFingerprintOutQualityMetrics,
{
'id': 'plotFingerprint_quality_metrics',
'title': 'deepTools: Fingerprint quality metrics',
'stacking': None,
'ymin': 0,
'ymax': 100,
'yLabelFormat': '{value}%',
'ylab': 'Percentage of fragments',
'categories': True,
'tt_label': '<strong>{point.x}</strong>: {point.y:.2f}%'
}
))
return len(self.deeptools_plotFingerprintOutQualityMetrics), len(self.deeptools_plotFingerprintOutRawCounts) | 0.004949 |
def rvs(self, size=1):
""" Rejection samples the parameter space.
"""
# create output FieldArray
out = record.FieldArray(size, dtype=[(arg, float)
for arg in self.variable_args])
# loop until enough samples accepted
n = 0
while n < size:
# draw samples
samples = {}
for dist in self.distributions:
draw = dist.rvs(1)
for param in dist.params:
samples[param] = draw[param][0]
vals = numpy.array([samples[arg] for arg in self.variable_args])
# determine if all parameter values are in prior space
# if they are then add to output
if self(**dict(zip(self.variable_args, vals))) > -numpy.inf:
out[n] = vals
n += 1
return out | 0.003359 |
def update_qa(quietly=False):
"""
Merge code from develop to qa
"""
switch('dev')
switch('qa')
local('git merge --no-edit develop')
local('git push')
if not quietly:
print(red('PLEASE DEPLOY CODE: fab deploy:all')) | 0.003876 |
def issubset(self, other):
"""Test whether the resources available in this machine description are
a (non-strict) subset of those available in another machine.
.. note::
This test being False does not imply that the this machine is
a superset of the other machine; machines may have disjoint
resources.
"""
return (set(self).issubset(set(other)) and
set(self.iter_links()).issubset(set(other.iter_links())) and
all(set(self[chip]).issubset(other[chip]) and
all(self[chip][r] <= other[chip][r]
for r in self[chip])
for chip in self)) | 0.002833 |
def reload(self):
'''
Clear plugin manager state and reload plugins.
This method will make use of :meth:`clear` and :meth:`load_plugin`,
so all internal state will be cleared, and all plugins defined in
:data:`self.app.config['plugin_modules']` will be loaded.
'''
self.clear()
for plugin in self.app.config.get('plugin_modules', ()):
self.load_plugin(plugin) | 0.004587 |
def SvelteComponent(name, path):
"""Display svelte components in iPython.
Args:
name: name of svelte component (must match component filename when built)
path: path to compile svelte .js file or source svelte .html file.
(If html file, we try to call svelte and build the file.)
Returns:
A function mapping data to a rendered svelte component in ipython.
"""
if path[-3:] == ".js":
js_path = path
elif path[-5:] == ".html":
print("Trying to build svelte component from html...")
js_path = build_svelte(path)
js_content = read(js_path, mode='r')
def inner(data):
id_str = js_id(name)
html = _template \
.replace("$js", js_content) \
.replace("$name", name) \
.replace("$data", json.dumps(data)) \
.replace("$id", id_str)
_display_html(html)
return inner | 0.009456 |
def geo_length(arg, use_spheroid=None):
"""
Compute length of a geo spatial data
Parameters
----------
arg : geometry or geography
use_spheroid : default None
Returns
-------
length : double scalar
"""
op = ops.GeoLength(arg, use_spheroid)
return op.to_expr() | 0.003236 |
def _wordAfterCursor(self):
"""Get word, which is located before cursor
"""
cursor = self._qpart.textCursor()
textAfterCursor = cursor.block().text()[cursor.positionInBlock():]
match = _wordAtStartRegExp.search(textAfterCursor)
if match:
return match.group(0)
else:
return '' | 0.005634 |
def _handle_hr(self):
"""Handle a wiki-style horizontal rule (``----``) in the string."""
length = 4
self._head += 3
while self._read(1) == "-":
length += 1
self._head += 1
self._emit(tokens.TagOpenOpen(wiki_markup="-" * length))
self._emit_text("hr")
self._emit(tokens.TagCloseSelfclose()) | 0.005405 |
def selectOptimalChunk(self, peer):
"""
select an optimal chunk to send to a peer.
@return: int(chunkNumber), str(chunkData) if there is data to be sent,
otherwise None, None
"""
# stuff I have
have = sets.Set(self.mask.positions(1))
# stuff that this peer wants
want = sets.Set(self.peers[peer].mask.positions(0))
exchangeable = have.intersection(want)
finalSet = dict.fromkeys(exchangeable, 0)
# taking a page from bittorrent, rarest-first
for chunkNumber in exchangeable:
for otherPeer in self.peers.itervalues():
finalSet[chunkNumber] += not otherPeer.mask[chunkNumber]
rarityList = [(rarity, random.random(), chunkNumber)
for (chunkNumber, rarity)
in finalSet.iteritems()]
if not rarityList:
return None, None
rarityList.sort()
chunkNumber = rarityList[-1][-1] # sorted in ascending order of rarity
# sanity check
assert self.mask[chunkNumber], "I wanted to send a chunk I didn't have"
self.file.seek(chunkNumber * CHUNK_SIZE)
chunkData = self.file.read(CHUNK_SIZE)
self.sha1sums[chunkNumber] = sha.new(chunkData).digest()
return chunkNumber, chunkData | 0.002256 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.