content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from localstack.services.es import es_api
def start_elasticsearch_service(port=None, asynchronous=False):
"""
Starts the ElasticSearch management API (not the actual elasticsearch process.
"""
port = port or config.PORT_ES
return start_local_api("ES", port, api="es", method=es_api.serve, asynchronous=asynchronous) | 0af83d283735ad1bfdd0684bf1bc1ff36e42d727 | 16,975 |
def regexp_ilike(expr, pattern):
"""
---------------------------------------------------------------------------
Returns true if the string contains a match for the regular expression.
Parameters
----------
expr: object
Expression.
pattern: object
A string containing the regular expression to match against the string.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
pattern = format_magic(pattern)
return str_sql("REGEXP_ILIKE({}, {})".format(expr, pattern)) | fe8e7c9a38b5379d265651d60d80bd8804219842 | 16,976 |
def make_tree(path):
"""Higher level function to be used with cache."""
return _make_tree(path) | 26919144c49f238c78a29ff4c2ce91d5da939484 | 16,978 |
def cmd(f):
"""Decorator to declare class method as a command"""
f.__command__ = True
return f | 3bdc82f0c83b0a4c0a0dd6a9629e7e2af489f0ae | 16,979 |
def small_prior():
"""Give string format of small uniform distribution prior"""
return "uniform(0, 10)" | fb636b564b238e22262b906a8e0626a5dff305d1 | 16,980 |
from typing import List
from typing import Dict
from typing import OrderedDict
def retrieve_panelist_ranks(panelist_id: int,
database_connection: mysql.connector.connect
) -> List[Dict]:
"""Retrieve a list of show dates and the panelist rank for the
requested panelist ID"""
cursor = database_connection.cursor()
query = ("SELECT s.showid, s.showdate, pm.showpnlrank "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE pm.panelistid = %s "
"AND s.bestof = 0 AND s.repeatshowid IS NULL "
"AND pm.panelistscore IS NOT NULL "
"ORDER BY s.showdate ASC;")
cursor.execute(query, (panelist_id,))
result = cursor.fetchall()
cursor.close()
if not result:
return None
ranks = []
for row in result:
info = OrderedDict()
info["show_id"] = row[0]
info["show_date"] = row[1].isoformat()
info["rank"] = row[2]
ranks.append(info)
return ranks | 0560a4f0d2c11f9dbd56d25c63d70fc29ed4292d | 16,981 |
def maxPixel(rpl):
"""maxPixel(rpl)
Computes the max pixel spectrum for the specified ripple/raw spectrum object."""
xs = epq.ExtremumSpectrum()
for r in xrange(0, rpl.getRows()):
dt2.StdOut.append(".")
if dt2.terminated:
break
for c in xrange(0, rpl.getColumns()):
rpl.setPosition(r, c)
xs.include(rpl)
return xs | 9cb40df8a02e7c861aebedb7d2e13c3fac04d024 | 16,982 |
def skip_device(name):
""" Decorator to mark a test to only run on certain devices
Takes single device name or list of names as argument
"""
def decorator(function):
name_list = name if type(name) == list else [name]
function.__dict__['skip_device'] = name_list
return function
return decorator | 1bacdce5396ada5e2ba7a8ca70a8dfb273016323 | 16,983 |
def conv1d(inputs,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for 1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: An integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
if context.in_eager_mode():
raise ValueError(
'Functional layers are currently not compatible with eager execution.'
'Use tf.layers.Conv1D instead.')
layer = Conv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_reuse=reuse,
_scope=name)
return layer.apply(inputs) | f3e9dc40d7da6a9bc7a55ec8b13c91a4ac8ba2c3 | 16,984 |
from typing import List
from typing import Union
def check_constraints(
df: pd.DataFrame, schema: dict
) -> List[Union[ConstraintError, ConstraintTypeError]]:
"""
Check table field constraints.
Arguments:
df: Table.
schema: Table schema (https://specs.frictionlessdata.io/table-schema).
Returns:
A list of errors.
"""
errors = []
for field in schema.get("fields", []):
constraints = field.get("constraints", {})
result = check_field_constraints(df[field["name"]], **constraints, field=field)
if result:
errors += result
return errors | bd6569932b2eb6e4510b7a5e8e6b22e92ddaa1e5 | 16,986 |
from typing import Iterable
from typing import Tuple
from typing import Optional
from typing import Mapping
from typing import Any
import textwrap
def consolidate_fully(
inputs: Iterable[Tuple[core.Key, xarray.Dataset]],
*,
merge_kwargs: Optional[Mapping[str, Any]] = None,
combine_kwargs: Optional[Mapping[str, Any]] = None,
) -> Tuple[core.Key, xarray.Dataset]:
"""Consolidate chunks via merge/concat into a single (Key, Dataset) pair."""
concatenated_chunks = []
combined_offsets = {}
combined_vars = set()
for key, chunk in consolidate_chunks(inputs, combine_kwargs):
# We expect all chunks to be fully combined in all dimensions and all chunks
# to have the same offset (in each dimension). The chunks from
# consolidate_chunks() should already have this property but we explicitly
# check it here again in case consolidate_chunks changes.
for dim, offset in key.offsets.items():
if dim in combined_offsets and combined_offsets[dim] != offset:
raise ValueError('consolidating chunks fully failed because '
f'chunk\n{chunk}\n has offsets {key.offsets} '
f'that differ from {combined_offsets}')
combined_offsets[dim] = offset
concatenated_chunks.append(chunk)
combined_vars.update(chunk.keys())
# Merge variables, but unlike consolidate_variables, we merge all chunks and
# not just chunks per unique key.
kwargs = dict(
compat='equals',
join='exact',
combine_attrs='override',
)
if merge_kwargs is not None:
kwargs.update(merge_kwargs)
try:
dataset = xarray.merge(concatenated_chunks, **kwargs)
except (ValueError, xarray.MergeError) as original_error:
repr_string = '\n'.join(repr(ds) for ds in concatenated_chunks[:2])
if len(concatenated_chunks) > 2:
repr_string += '\n...'
repr_string = textwrap.indent(repr_string, prefix=' ')
raise ValueError(
f'merging dataset chunks with variables {combined_vars} failed.\n'
+ repr_string
) from original_error
return core.Key(combined_offsets, combined_vars), dataset | 240f2579f97ed1b2eaef2d4d7e9e35ed17dbacdf | 16,987 |
from typing import Iterator
from re import T
from typing import Optional
def first(items: Iterator[T]) -> Optional[T]:
"""Return the first item of the iterator."""
return next(items, None) | 5571c8d1541ce2cb3f49da736f92e17fe6326e6d | 16,988 |
from typing import Sequence
def plot_precision_recall_curve(
precisions: Sequence[float], recalls: Sequence[float],
title: str = 'Precision/Recall curve'
) -> matplotlib.figure.Figure:
"""
Plots the precision recall curve given lists of (ordered) precision
and recall values.
Args:
precisions: list of float, precision for corresponding recall values,
should have same length as *recalls*.
recalls: list of float, recall for corresponding precision values,
should have same length as *precisions*.
title: str, plot title
Returns: matplotlib.figure.Figure, reference to the figure
"""
assert len(precisions) == len(recalls)
fig, ax = plt.subplots(1, 1, tight_layout=True)
ax.step(recalls, precisions, color='b', alpha=0.2, where='post')
ax.fill_between(recalls, precisions, alpha=0.2, color='b', step='post')
ax.set(x_label='Recall', y_label='Precision', title=title)
ax.set(x_lim=(0.0, 1.05), y_lim=(0.0, 1.05))
return fig | d71220b71dfe26aae949676105ba643e249f1c69 | 16,989 |
import json
def Serialize(obj):
"""Return a binary serialized version of object.
Depending on the serialization method, some complex objects or input
formats may not be serializable.
UTF-8 strings (by themselves or in other structures e.g. lists) are always
supported.
Args:
obj: any object
Returns:
str, possibly containing ascii values >127
Raises:
SerializeError: if an error occured during serialization
"""
try:
return json.dumps(obj)
except TypeError as e:
raise SerializeError(e) | d9632f0104c69bfb38396f47f5813fd9a87d6361 | 16,990 |
from typing import Union
def getDragObject(parent: QWidget, item: Union['SourceListWidgetItem', 'DestTreeWidgetItem']) -> QDrag:
"""Instantiate QDrag of type application/draggerItem with corresponding QMimeData
Parameters
----------
parent: QWidget
item: Union['SourceListWidgetItem', 'DestTreeWidgetItem']
Returns
-------
QDrag
QDrag object holding item value as QMimeData
"""
# construct dataStream with item value
itemData = QByteArray()
getData(itemData, item.value)
mimeData = QMimeData()
mimeData.setData(LISTBOX_W_VALUE_MIMETYPE, itemData)
drag = QDrag(parent)
drag.setHotSpot(QPoint(0, 0))
drag.setMimeData(mimeData)
return drag | a6990e9f1a95632d25e15d993d0117cdb911cf7b | 16,991 |
import requests
from bs4 import BeautifulSoup
def get_soup(url):
"""
Makes a request to the given url and returns a BeautifulSoup instance of Soup
"""
res = requests.get(url)
if not res.content:
return None
soup = BeautifulSoup(res.content, "lxml")
return soup | bc4e79f4e2313e3c3edc6f6f123b6d13f71c0075 | 16,992 |
def _grow_segment(segment, addition):
"""Combine two segments into one, if possible."""
if _eq(segment[-1], addition[0]): # append addition
return segment + addition[1:]
elif _eq(segment[-1], addition[-1]): # append reversed addition
return segment + list(reversed(addition[:-1]))
elif _eq(segment[0], addition[-1]): # prepend addition
return addition[:-1] + segment
elif _eq(segment[0], addition[0]): # prepend reversed addition
return list(reversed(addition[1:])) + segment
else:
raise ValueError("addition doesn't fit segment") | 12f48ec2efbd74ac09f9277f2769a4b35030a425 | 16,993 |
def dsmoothlist_by_deform_exp(deform_exp, ag_mode):
"""
Automatically extract the selected artificial generations for training and validation set:
'Resp': ['respiratory_motion', 'single_frequency', 'mixed_frequency', 'zero'],
'NoResp': ['single_frequency', 'mixed_frequency', 'zero'],
'SingleOnly': ['single_frequency'],
'MixedOnly': ['mixed_frequency'],
'SingleResp': ['single_frequency', 'respiratory_motion', 'zero'],
please note that for validation set we do not need to select all of them
:param deform_exp:
:param ag_mode: artificial generation mode: 'Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization'
:return:
"""
if ag_mode not in ['Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization']:
raise ValueError("exp_mode should be in ['Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization']")
dsmoothlist_training = []
dsmoothlist_validation = []
deform_exp_setting = load_deform_exp_setting(deform_exp)
all_deform_methods = deform_exp_setting['DeformMethods']
comp_dict = {'Resp': ['respiratory_motion', 'single_frequency', 'mixed_frequency', 'zero'],
'NoResp': ['single_frequency', 'mixed_frequency', 'zero'],
'SingleOnly': ['single_frequency'],
'MixedOnly': ['mixed_frequency'],
'SingleResp': ['single_frequency', 'respiratory_motion', 'zero'],
'Visualization': []
}
for i, deform_method in enumerate(all_deform_methods):
if deform_method in comp_dict[ag_mode]:
dsmoothlist_training.append(i)
if deform_exp in ['3D_max7_D14_K', '3D_max15_D14_K', '3D_max20_D14_K', '3D_max15_SingleFrequency_Visualization']:
if ag_mode == 'Resp':
dsmoothlist_validation = [0, 5, 10]
elif ag_mode == 'NoResp':
dsmoothlist_validation = [5, 8, 10]
elif ag_mode == 'SingleResp':
dsmoothlist_validation = [4, 8, 10]
elif ag_mode == 'SingleOnly':
dsmoothlist_validation = [5, 6, 8]
elif ag_mode == 'MixedOnly':
dsmoothlist_validation = [9, 10, 12]
else:
raise ValueError('dsmoothlist_validation not found for deform_exp='+deform_exp+', please add it manually')
return dsmoothlist_training, dsmoothlist_validation | 965ecf7373c313dccd290fb8a7c6c2075645a16a | 16,994 |
from typing import Callable
from typing import BinaryIO
from typing import Tuple
def get_data_reader(header: Header) -> Callable[[BinaryIO], Tuple]:
"""Make a binary reader function for data."""
names = get_data_names(header)
format_ = ""
for name in names:
if "CH" in name:
format_ += "h"
elif "Pulse" in name:
format_ += "L"
elif "Logic" in name:
format_ += "H"
elif "Alarm" in name:
format_ += "H"
elif "AlOut" in name:
format_ += "H"
elif "Status" in name:
format_ += "H"
else:
raise ValueError(name)
struct = Struct(BIG_ENDIAN + format_)
def reader(f: BinaryIO) -> Tuple:
return struct.unpack(f.read(struct.size))
return reader | c243d5d50ec8738f8f8673fd9bf40b9d26cad69b | 16,995 |
def test_api_mediawiki(monkeypatch):
"""The api_mediawiki test using mocks."""
result = "OpenClassrooms est une école en ligne..."
def mock_summary(*args, **kwargs):
return result
monkeypatch.setattr(
MediawikiApi, 'search', mock_summary)
wikipedia = MediawikiApi()
assert wikipedia.search('openclassrooms') == result | 28b22d4acf195dee3d1e7f10688610fea71fea3f | 16,996 |
from typing import Callable
def check_fnr(fnr: str, d_numbers=True, h_numbers=False, logger: Callable = lambda _x: None) -> bool:
"""
Check if a number is a valid fødselsnumber.
Args:
fnr: A string containing the fodselsnummer to check
h_numbers: False (the default) if h-numbers should be accepted
d_numbers: True (the default) if d-numbers should be accepted
logger: A function used to log things
Returns:
True if it is a valid fodselsnummer, False otherwise.
"""
try:
return validate_fnr(fnr=fnr, d_numbers=d_numbers, h_numbers=h_numbers)
except ValueError as e:
logger(str(e))
return False | 2d5af194f1a69a093c6bf69b2cd42537d2dd32b0 | 16,997 |
import unittest
def get_case_list_from_cls(test_cls_list):
"""
将测试类转化为测试用例
:return:
"""
test_list = []
for test_cls in test_cls_list:
test_cases = unittest.TestLoader().loadTestsFromTestCase(test_cls)
test_list.append(test_cases)
return test_list | 3f7ed0c7ed0b9110a9cb11579087712321ec868e | 16,998 |
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
def align_times(sync_behavioral, sync_neural, score_thresh=0.9999,
ignore_poor_alignment=False, return_model=False, verbose=False):
"""Align times across different recording systems.
Parameters
----------
sync_behavioral : 1d array
Sync pulse times from behavioral computer.
sync_neural : 1d array
Sync pulse times from neural computer.
score_thresh : float, optional, default: 0.9999
R^2 threshold value to check that the fit model is better than.
ignore_poor_alignment : bool, optional, default: False
Whether to ignore a bad alignment score.
return_model : bool, optional, default: False
Whether to return the model object. If False, returns
verbose : bool, optional, default: False
Whether to print out model information.
Returns
-------
model : LinearRegression
The fit model object. Only returned if `return_model` is True.
model_intercept : float
Intercept of the model predicting differences between sync pulses.
Returned if `return_model` is False.
model_coef : float
Learned coefficient of the model predicting differences between sync pulses.
Returned if `return_model` is False.
score : float
R^2 score of the model, indicating how good a fit there is between sync pulses.
"""
# sklearn imports are weird, so re-import here
# the sub-modules here aren't available from the global namespace
# Reshape to column arrays for scikit-learn
sync_behavioral = sync_behavioral.reshape(-1, 1)
sync_neural = sync_neural.reshape(-1, 1)
# Linear model to predict alignment between time traces
x_train, x_test, y_train, y_test = train_test_split(\
sync_behavioral, sync_neural, test_size=0.50, random_state=42)
model = LinearRegression()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
score = r2_score(y_test, y_pred)
bad_score_msg = 'This session has bad synchronization between brain and behavior'
if score < score_thresh:
if not ignore_poor_alignment:
raise ValueError(bad_score_msg)
else:
print(bad_score_msg)
if verbose:
print('coef', model.coef_[0], '\n intercept', model.intercept_[0])
print('score', score)
if return_model:
return model, score
else:
return model.intercept_[0], model.coef_[0][0], score | 8bc8ad2a92267a0c1c5e8a4c6a71494910df8b7f | 16,999 |
def check_media(url):
"""Check if something is available or has a new hash
Checks if url is available, uf yes, download and hash it, then see if it has changed
Args:
url: A complete url to something
Returns:
0 if available and no change.
1 if not available.
2 if it has changed
"""
media = http.download_something(url):
# If failed to download
if not media:
return 1
# Hash media
hashed_media = hashlib.sha512(media).hexdigest() | f7237207a7ff6e555533cebe4dc83fa77538886c | 17,000 |
from typing import Iterable
from typing import Any
from typing import Tuple
def tuple_from_iterable(val: Iterable[Any]) -> Tuple[Any, ...]:
"""Builds a tuple from an iterable.
Workaround for https://github.com/python-attrs/attrs/issues/519
"""
return tuple(val) | 7880b1395f14aa690f967b9548456105b544d337 | 17,002 |
from typing import Counter
def merge_vocabs(vocabs, vocab_size=None):
"""
Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab`
"""
merged = sum([vocab.freqs for vocab in vocabs], Counter())
return Vocab(merged, specials=['<pad>','<unk>','<sep>','<sos>','<eos>'], vectors = 'fasttext.en.300d') | db83e858c1a8910b382bcd485923ef6ba9a1466e | 17,003 |
def count_nonzero(X, axis=None, sample_weight=None):
"""A variant of X.getnnz() with extension to weighting on axis 0
Useful in efficiently calculating multilabel metrics.
Parameters
----------
X : sparse matrix of shape (n_samples, n_labels)
Input data. It should be of CSR format.
axis : {0, 1}, default=None
The axis on which the data is aggregated.
sample_weight : array-like of shape (n_samples,), default=None
Weight for each row of X.
"""
if axis == -1:
axis = 1
elif axis == -2:
axis = 0
elif X.format != 'csr':
raise TypeError('Expected CSR sparse format, got {0}'.format(X.format))
# We rely here on the fact that np.diff(Y.indptr) for a CSR
# will return the number of nonzero entries in each row.
# A bincount over Y.indices will return the number of nonzeros
# in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14.
if axis is None:
if sample_weight is None:
return X.nnz
else:
return np.dot(np.diff(X.indptr), sample_weight)
elif axis == 1:
out = np.diff(X.indptr)
if sample_weight is None:
# astype here is for consistency with axis=0 dtype
return out.astype('intp')
return out * sample_weight
elif axis == 0:
if sample_weight is None:
return np.bincount(X.indices, minlength=X.shape[1])
else:
weights = np.repeat(sample_weight, np.diff(X.indptr))
return np.bincount(X.indices, minlength=X.shape[1],
weights=weights)
else:
raise ValueError('Unsupported axis: {0}'.format(axis)) | e6754cca480d626dd7ba2c96426e5eebf17a1fcb | 17,005 |
def isolate_shape_axis(base, target, axis_list = ['X','Y','Z']):
"""
Given a base mesh, only take axis movement on the target that is specified in axis_list.
Args:
base (str): The base mesh that has no targets applied.
target (str): The target mesh vertices moved to a different position than the base.
axis_list (list): The axises of movement allowed. If axis_list = ['X'], only vertex movement on x will be present in the result.
Returns:
str: A new mesh with verts moving only on the isolated axis.
"""
verts = cmds.ls('%s.vtx[*]' % target, flatten = True)
if not verts:
return
vert_count = len(verts)
axis_name = '_'.join(axis_list)
new_target = cmds.duplicate(target, n = '%s_%s' % (target, axis_name))[0]
for inc in range(0, vert_count):
base_pos = cmds.xform('%s.vtx[%s]' % (base, inc), q = True, t = True, ws = True)
target_pos = cmds.xform('%s.vtx[%s]' % (target, inc), q = True, t = True, ws = True)
if (base_pos == target_pos):
continue
small_x = False
small_y = False
small_z = False
if abs(base_pos[0]-target_pos[0]) < 0.0001:
small_x = True
if abs(base_pos[1]-target_pos[1]) < 0.0001:
small_y = True
if abs(base_pos[2]-target_pos[2]) < 0.0001:
small_z = True
if small_x and small_y and small_z:
continue
if not 'X' in axis_list:
target_pos[0] = base_pos[0]
if not 'Y' in axis_list:
target_pos[1] = base_pos[1]
if not 'Z' in axis_list:
target_pos[2] = base_pos[2]
cmds.xform('%s.vtx[%s]' % (new_target, inc), ws = True, t = target_pos)
return new_target | a097442c2c379338890e5571d0e3516553fe70f3 | 17,006 |
from typing import List
def _ge(t1: 'Tensor', t2: 'Tensor', isnew: bool) -> 'Tensor':
"""
Also see
--------
:param t1:
:param t2:
:param isnew:
:return:
"""
data = t1.data >= t2.data
requires_grad = t1.requires_grad or t2.requires_grad
depends_on: List[Dependency] = []
if t1.requires_grad:
def grad_fn1(grad: 'np.ndarray') -> 'np.ndarray':
# Maually, discontinuous function just take its gradient to zero.
return np.zeros_like(t1.data)
depends_on.append(Dependency(t1, grad_fn1))
if t2.requires_grad:
def grad_fn2(grad: 'np.ndarray') -> 'np.ndarray':
return np.zeros_like(t2.data)
depends_on.append(Dependency(t2, grad_fn2))
if isnew:
requires_grad = False
depends_on: List[Dependency] = []
return Tensor(data,
requires_grad,
depends_on) | 02b0407c3b2bc3ed6bf65555ab62257e3f041d0e | 17,007 |
from typing import Dict
from typing import Any
import logging
import numpy
def convert_homogeneous_graph(graph: Dict[str, Any],
num_graphs: int,
output_dir: str):
"""Process a homogeneous graph."""
# NOTE(blais): We could in theory stash the data in the same format as their
# heterogeneous graphs in Python and just use convert_heterogeneous_graph().
# Gather node features.
logging.info("Processing node features")
num_nodes = graph.pop("num_nodes")
graph["node_#id"] = numpy.arange(num_nodes).astype(bytes)
node_features = extract_features(graph, "node", num_nodes)
filename = write_table(output_dir, "nodes", node_features, num_nodes)
node_features_dict = {}
node_features_dict["nodes"] = (filename, node_features)
# Gather edge features.
logging.info("Processing edge features")
indices = graph.pop("edge_index")
assert len(indices.shape) == 2
num_edges = indices.shape[1]
graph["edge_{}".format(tfgnn.SOURCE_NAME)] = indices[0].astype(bytes)
graph["edge_{}".format(tfgnn.TARGET_NAME)] = indices[1].astype(bytes)
# NOTE(blais): If external edge features are needed and each edge is
# unique, you can use this:
# graph["edge_#id"] = ["{}_{}".format(edge_index[0, i], edge_index[1, i])
# for i in range(num_edges)]
edge_features = extract_features(graph, "edge", num_edges)
filename = write_table(output_dir, "edges", edge_features, num_edges)
edge_features_dict = {}
edge_features_dict["edges"] = (filename, "nodes", "nodes", edge_features)
# Gather context features.
logging.info("Processing graph context features")
if num_graphs > 1:
graph_features = extract_features(graph, "graph", num_graphs)
filename = write_table(output_dir, "graph", graph_features, num_graphs)
context_features = (filename, graph_features)
else:
context_features = None
# Make sure we processed everything.
graph = remove_empty_dicts(graph)
if graph:
logging.error("Graph is not empty: %s", graph)
# Produce a corresponding graph schema.
logging.info("Producing graph schema")
return create_schema(context_features, node_features_dict, edge_features_dict) | 4aa0751437861af58159228c018a3f6d94b8613a | 17,008 |
def second_deriv_log_pdf(phi, alpha, beta, eps=1e-4):
"""Second derivative of `log_pdf` with respect to latitude."""
return (
log_pdf(phi + eps, alpha, beta)
- 2 * log_pdf(phi, alpha, beta)
+ log_pdf(phi - eps, alpha, beta)
) / eps ** 2 | 5df140d62466481997a472e260241961e872cbe3 | 17,009 |
from typing import OrderedDict
def normalize_data(data, zp=25., zpsys='ab'):
"""Return a copy of the data with all flux and fluxerr values normalized
to the given zeropoint. Assumes data has already been standardized.
Parameters
----------
data : `~numpy.ndarray`
Structured array.
zp : float
zpsys : str
Returns
-------
normalized_data : `~numpy.ndarray`
"""
warn_once('standardize_data', '1.5', '2.0',
'This function not intended for public use; open an issue at '
'https://github.com/sncosmo/sncosmo/issues if you need this '
'functionality.')
normmagsys = get_magsystem(zpsys)
factor = np.empty(len(data), dtype=np.float)
for b in set(data['band'].tolist()):
idx = data['band'] == b
b = get_bandpass(b)
bandfactor = 10.**(0.4 * (zp - data['zp'][idx]))
bandzpsys = data['zpsys'][idx]
for ms in set(bandzpsys):
idx2 = bandzpsys == ms
ms = get_magsystem(ms)
bandfactor[idx2] *= (ms.zpbandflux(b) / normmagsys.zpbandflux(b))
factor[idx] = bandfactor
normalized_data = OrderedDict([('time', data['time']),
('band', data['band']),
('flux', data['flux'] * factor),
('fluxerr', data['fluxerr'] * factor),
('zp', zp),
('zpsys', zpsys)])
return dict_to_array(normalized_data) | 9aa3c4faf6f9a9f98afd9e11d2bafaf1b026519c | 17,011 |
import base64
def CreateMessage(sender, to, subject, message_text):
"""
Creates an object containing a base64url encoded email object.
"""
message = MIMEText(message_text)
message['to'] = to
message['from'] = sender
message['subject'] = subject
raw_message = base64.urlsafe_b64encode(message.as_bytes())
raw_message = raw_message.decode()
return {'raw': raw_message } | 8d55b64ebf4801781126f244441f619201d51190 | 17,012 |
def bg_lookup(bg_name: str) -> str:
"""Look up ANSI escape codes based on background color name.
:param bg_name: background color name to look up ANSI escape code(s) for
:return: ANSI escape code(s) associated with this color
:raises ValueError if the color cannot be found
"""
try:
ansi_escape = BG_COLORS[bg_name.lower()]
except KeyError:
raise ValueError('Background color {!r} does not exist.'.format(bg_name))
return ansi_escape | 8c520f599bc41ce847e5c602ddf8500fe366f24d | 17,013 |
def readData(f):
"""
Parse taxon count table (from count-taxon.py)
Parameters:
-----------
f : str
file name of taxon count table
Returns:
--------
tuple
a list of taxons and a list of their counts
"""
taxa_lis = []
num_lis = []
for n, line in enumerate(open(f)):
if line.startswith('#'):
continue
line = line.rstrip()
if line == '':
continue
taxa, num = line.split('\t')
skip = False
for word in EXCLUDE:
if word in taxa:
skip = True
break
if skip:
continue
taxa = taxa.rstrip(';')
lis = taxa.split(';')
lis2 = []
for item in lis:
item = item.strip()
if item.endswith(')'):
item = item.split('(')[0].strip()
# remove taxon level prefix, e.g. 'p__Firmicutes'
if '__' in item:
item = item.split('__', 1)[1]
#item = item.strip('"')
item = item.lower()
if 'unclassified' in item:
item = 'Unclassifed'
elif 'unknown' in item:
item = 'Unclassifed'
elif 'other' in item:
item = 'Unclassifed'
elif 'unassigned' in item:
item = 'Unclassifed'
item = item.capitalize()
lis2.append(item)
taxa_lis.append(lis2)
num_lis.append(float(num))
return taxa_lis, num_lis | fcd10e1d7dc1db0b871c7a4802f012eec43c08a9 | 17,014 |
def detect_onsets_offsets(data, threshold, min_distance):
"""
detects when a when a signal jumps above zero, and when it goes back to zero
"""
on = (data > threshold) # when the data is greater than zero
left_on = np.concatenate(([0], on), axis=0)[0:-1]
onset = np.squeeze(np.where(on & (left_on != True)))
offset = np.squeeze(np.where((on != True) & (left_on == True)))
if data[-1] > threshold:
offset = np.append(offset, len(data)) # make sure there is an offset at some point...
if len(np.shape(onset)) < 1:
offset = [offset]
onset = [onset]
new_offset = []
new_onset = []
if len(onset) > 0:
new_onset.append(onset[0])
if len(onset) > 1:
for i in range(len(onset)-1):
if (onset[i+1] - offset[i]) > min_distance:
new_onset.append(onset[i+1])
new_offset.append(offset[i])
new_offset.append(offset[-1])
return new_onset, new_offset | faa81445828b72bc7d7433a4c2c8740bb36050bb | 17,016 |
def STEPConstruct_PointHasher_IsEqual(*args):
"""
* Returns True when the two keys are the same. Two same keys must have the same hashcode, the contrary is not necessary.
:param Point1:
:type Point1: gp_Pnt
:param Point2:
:type Point2: gp_Pnt
:rtype: bool
"""
return _STEPConstruct.STEPConstruct_PointHasher_IsEqual(*args) | b3aa095d723203b05ea29ec4f5b34a70bc4c5276 | 17,017 |
def step_smooth(x) :
""" Smooth polinomial rising step from 0(x=0) to 1(x=1)
"""
return np.select([x>1, x>0], [1, 3*np.square(x)-2*np.power(x,3)], default=0) | ccf53e2561e256d2114510598ebb7a2ec1ce7cbd | 17,018 |
def getBitSizeOfVarInt64(value):
"""
Gets bit size of variable 64-bit signed integer value.
:param value: Value to use for bit size calculation.
:returns: Bit size of the value.
"""
return _getBitSizeOfVarIntImpl(value, VARINT64_MAX_VALUES, signed=True) | 20c52df5ec9a00680e771f206319a02e3ba3de66 | 17,019 |
import functools
def nan_if_exception(func):
"""Wrap func such that np.nan is returned if func raises an exception.
KeyboardInterrupt and SystemExit are still raised.
Examples:
>>> @nan_if_exception
... def f(x, y):
... assert x + y >= 5
>>> f(1, 2)
nan
>>> def f(x, y):
... assert x + y >= 5
>>> g = nan_if_exception(f)
>>> g(1, 2)
nan
"""
@functools.wraps(func)
def wrapper_nan_if_exception(params, *args, **kwargs):
try:
out = func(params, *args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
out = np.nan
return out
return wrapper_nan_if_exception | f03c314741c47805d767fc62fbce49cda9be35fe | 17,020 |
def get_client_public_key(patient_id, study_id):
"""Grabs a user's public key file from s3."""
key_pair_paths = construct_s3_key_paths(study_id, patient_id)
key = s3_retrieve(key_pair_paths['public'], study_id, raw_path=True)
return encryption.import_RSA_key( key ) | d6e6560c49f925a8f87a84829d632f67957f3c79 | 17,021 |
from typing import List
from typing import Tuple
def plot_offset_direction(
dsaimage: Image, coords: SkyCoord, ra_offsets: List[float], dec_offsets: List[float]
) -> Tuple["matplotlib.fig", "matplotlib.axes.Axes"]:
"""Plot measured offsets on an image."""
fig, ax = dsaimage.show()
dsaimage.add_arrows(coords, ra_offsets, dec_offsets)
return fig, ax | 24416863b795538ee8c86fa1949f6d701b66f28d | 17,022 |
def sensitivity_metric(event_id_1, event_id_2):
"""Determine similarity between two epochs, given their event ids."""
if event_id_1 == 1 and event_id_2 == 1:
return 0 # Completely similar
if event_id_1 == 2 and event_id_2 == 2:
return 0.5 # Somewhat similar
elif event_id_1 == 1 and event_id_2 == 2:
return 0.5 # Somewhat similar
elif event_id_1 == 2 and event_id_1 == 1:
return 0.5 # Somewhat similar
else:
return 1 | b04c5fa27ef655dd3f371c3ce6ef0410c55dd05b | 17,024 |
def duracion_promedio_peliculas(p1: dict, p2: dict, p3: dict, p4: dict, p5: dict) -> str:
"""Calcula la duracion promedio de las peliculas que entran por parametro.
Esto es, la duración total de todas las peliculas dividida sobre el numero de peliculas.
Retorna la duracion promedio en una cadena de formato 'HH:MM' ignorando los posibles decimales.
Parametros:
p1 (dict): Diccionario que contiene la informacion de la pelicula 1.
p2 (dict): Diccionario que contiene la informacion de la pelicula 2.
p3 (dict): Diccionario que contiene la informacion de la pelicula 3.
p4 (dict): Diccionario que contiene la informacion de la pelicula 4.
p5 (dict): Diccionario que contiene la informacion de la pelicula 5.
Retorna:
str: la duracion promedio de las peliculas en formato 'HH:MM'.
"""
# Se extraen las duraciones de las películas.
duracion1 = p1["duracion"]
duracion2 = p2["duracion"]
duracion3 = p3["duracion"]
duracion4 = p4["duracion"]
duracion5 = p5["duracion"]
# Promedio de duraciones de las películas.
promedio = (duracion1 + duracion2 + duracion3 + duracion4 + duracion5) / 5
# Conversión a formato 'HH:MM'.
horas = promedio // 60
minutos = promedio % 60
if horas < 10:
horas = '0' + str(int(horas))
else:
horas = str(int(horas))
if minutos < 10:
minutos = '0' + str(int(minutos))
else:
minutos = str(int(minutos))
return horas + ":" + minutos | a8cfcc96a43480ee6830cc212343a33148036c5d | 17,025 |
def _to_test_data(text):
"""
Lines should be of this format: <word> <normal_form> <tag>.
Lines that starts with "#" and blank lines are skipped.
"""
return [l.split(None, 2) for l in text.splitlines()
if l.strip() and not l.startswith("#")] | 8f0bae9f81d2d14b5654622f1493b23abd88424d | 17,026 |
import copy
def append(motion1, motion2):
"""
Combines two motion sequences into one. motion2 is appended to motion1.
The operation is not done in place.
Note that the operation places the sequences next to each other without
attempting to blend between the poses. To interpolate between the end of
motion1 and start of motion2, use the `append_and_blend` operation.
Args:
motion1, motion2: Motion sequences to be combined.
"""
assert isinstance(motion1, motion_class.Motion)
assert isinstance(motion2, motion_class.Motion)
assert motion1.skel.num_joints() == motion2.skel.num_joints()
combined_motion = copy.deepcopy(motion1)
combined_motion.name = f"{motion1.name}+{motion2.name}"
combined_motion.poses.extend(motion2.poses)
return combined_motion | dc51812f450a072ad283173a15fb2c07ae978e5b | 17,027 |
from datetime import datetime
from typing import List
def service(
fmt: SupportedFormats,
begints: datetime = Query(
..., description="Inclusive UTC timestamp window start for issuance."
),
endts: datetime = Query(
..., description="Exclusive UTC timestamp window end for issuance."
),
wfo: List[str] = Query(
None, description="WFO 3-letter codes for filter.", max_length=3
),
only_new: bool = Query(True, description="Only include issuance events."),
ph: List[str] = Query(
None, description="VTEC Phenomena 2-letter codes.", max_length=2
),
):
"""Replaced above."""
df = handler(begints, endts, wfo, only_new, ph)
return deliver_df(df, fmt) | eeb0a8b1187ff2386401440b6ddd812b81cd0fdd | 17,028 |
def cols_shuffled(expr_df, dist_df=None, algo="agno", seed=0):
""" Return a copy of the expr_df DataFrame with columns shuffled randomly.
:param pandas.DataFrame expr_df: the DataFrame to copy and shuffle
:param pandas.DataFrame dist_df: the distance DataFrame to inform us about distances between columns
:param str algo: Agnostic to distance ('agno') or distance aware ('dist')?
:param int seed: set numpy's random seed if desired
:returns: A copy of the expr_df DataFrame with columns shuffled.
"""
shuffled_df = expr_df.copy(deep=True)
np.random.seed(seed)
if algo == "agno":
shuffled_df.columns = np.random.permutation(expr_df.columns)
elif algo == "dist":
# Make a distance-similarity matrix, allowing us to characterize one well_id's distance-similarity to another.
diss = pd.DataFrame(data=np.corrcoef(dist_df.values), columns=dist_df.columns, index=dist_df.index)
# Old and new well_id indices
available_ids = list(expr_df.columns)
shuffled_well_ids = []
# For each well_id in the original list, replace it with another one as distance-similar as possible.
for well_id in list(expr_df.columns):
# Do we want to avoid same tissue-class?
# This algo allows for keeping the same well_id and doesn't even look at tissue-class.
# sort the distance-similarity by THIS well_id's column, but use corresponding index of well_ids
candidates = diss.sort_values(by=well_id, ascending=False).index
candidates = [x for x in candidates if x in available_ids]
if len(candidates) == 1:
candidate = candidates[0]
elif len(candidates) < 20:
candidate = np.random.permutation(candidates)[0]
else:
n_candidates = min(20, int(len(candidates) / 5.0))
candidate = np.random.permutation(candidates[:n_candidates])[0]
# We have our winner, save it to our new list and remove it from what's available.
shuffled_well_ids.append(candidate)
available_ids.remove(candidate)
shuffled_df.columns = shuffled_well_ids
else:
shuffled_df = pd.DataFrame()
# Column labels have been shuffled; return a dataframe with identically ordered labels and moved data.
return shuffled_df.loc[:, expr_df.columns], dict(zip(expr_df.columns, shuffled_df.columns)) | 37773c5219ecc92925c155e9d911c42ddbebc8ea | 17,029 |
from typing import Dict
from typing import Any
from re import L
def build_model(task_description: Dict[str, Any]) -> Dict[str, Any]:
"""Build the predinet model."""
# ---------------------------
# Setup and process inputs
processors = {"image": process_image, "task_id": process_task_id}
mlp_inputs = utils.factory.create_input_layers(task_description, processors)
# ---------------------------
# Concatenate processed inputs
concat_in = next(iter(mlp_inputs["processed"].values()))
if len(mlp_inputs["processed"]) > 1:
concat_in = L.Concatenate()(list(mlp_inputs["processed"].values()))
# ---------------------------
for size, activation in zip(C["mlp_hidden_sizes"], C["mlp_hidden_activations"]):
concat_in = L.Dense(size, activation=activation)(concat_in)
predictions = L.Dense(task_description["output"]["num_categories"])(concat_in)
# ---------------------------
# Create model instance
model = tf.keras.Model(
inputs=mlp_inputs["input_layers"],
outputs=predictions,
name="mlp_image_classifier",
)
# ---------------------------
# Compile model for training
dataset_type = task_description["output"]["type"]
assert (
dataset_type == "binary"
), f"MLP image classifier requires a binary classification dataset, got {dataset_type}"
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.keras.metrics.BinaryAccuracy(name="acc")
# ---------------------------
return {"model": model, "loss": loss, "metrics": metrics} | 03c7951d3fb0fddbfb1e4bad3b4e5ce54253f994 | 17,031 |
def create_unet_model(N_classes, input_shape=(None, None, 1), dropout_rate=0.24, learning_rate=1e-5):
"""
Implementation of Unet mode for multiclass semantic segmentation
:param N_classes: Number of classes of segmentation map
:param input_shape: input image shape
:param dropout_rate: dropout rate
:return: a tuple of two models, first element is model to train and second is model to save
"""
# make sure the sizes are divisible by 16
if(input_shape[0] is not None): assert 16 * (input_shape[0] // 16) == input_shape[0], 'invalid dimension 0'
if( input_shape[1] is not None): assert 16 * (input_shape[1] // 16) == input_shape[1], 'invalid dimension 1'
in_image = Input(shape=input_shape)
conv0 = Conv2D(32, (3, 3), activation='relu', name='conv1_0', padding='same')(in_image)
conv1, x = conv_block_down(32, dropout_rate=dropout_rate ) (conv0)
conv2, x = conv_block_down(64, dropout_rate=dropout_rate ) (x)
conv3, x = conv_block_down(128, dropout_rate=dropout_rate )(x)
conv4, x = conv_block_down(256, dropout_rate=dropout_rate )(x)
x = conv_block(512, dropout_rate=dropout_rate ) (x)
x = deconv_block(512, skip_layer=conv4, dropout_rate=dropout_rate ) (x)
x = deconv_block(256, skip_layer=conv3, dropout_rate=dropout_rate ) (x)
x = deconv_block(128, skip_layer=conv2, dropout_rate=dropout_rate ) (x)
x = deconv_block(64, skip_layer=conv1, dropout_rate=dropout_rate ) (x)
outp_logit = Conv2D(N_classes, (1, 1), activation='linear', padding='same', name='logit')(x)
outp_softmax = Softmax4D(axis=3, name='segmap')(outp_logit)
model_train = Model(inputs=in_image, outputs=[outp_logit,outp_softmax])
model_save = Model(inputs=in_image, outputs=[outp_softmax])
#if last channel is background
if(N_classes <=5):
class_indices = list(range(N_classes))[:-1] #except last one which is background
metrics_classwise=[]
for c in class_indices:
fc = multiclass_dice_coef_metric(from_logits=True, class_index=c)
fc.__name__='dmc'+str(c)
metrics_classwise.append(fc)
metrics = {'logit': metrics_classwise}
else:
metrics = {'logit': [multiclass_dice_coef_metric(from_logits=True)]} #all classes
model_train.compile(optimizer=Adam(lr=learning_rate),
loss={'logit': multiclass_balanced_cross_entropy(from_logits=True, P=5)},
metrics=metrics)
return Models(model_train, model_save) | 93c15376eed0c5cf1abe689ef1daca6c8877e61a | 17,032 |
from uutils.torch_uu.models.learner_from_opt_as_few_shot_paper import get_default_learner
from typing import Optional
def get_5cnn_model(image_size: int = 84,
bn_eps: float = 1e-3,
bn_momentum: float = 0.95,
n_classes: int = 5,
filter_size: int = 32,
levels: Optional = None,
spp: bool = False) -> nn.Module:
"""
Gets a 5CNN that does not change the spatial dimension [H,W] as it processes the image.
:return:
"""
mdl: nn.Module = get_default_learner(image_size, bn_eps, bn_momentum, n_classes, filter_size, levels, spp)
return mdl | 6b3e21e33433102b16b70c88dd5d033e1f069b86 | 17,034 |
def valid_extract_input_specification(instance_of_property, depth, language_code, named_entity_label):
""" Checks if the input for the extraction is valid. Both to help
the user get correct input and to sanitize it to avoid
attacks as the values are used to generate filenames.
"""
pattern_match = valid_instance_of_property_pattern.match(instance_of_property)
if instance_of_property != "manual_entry" and instance_of_property != "stopwords" and( pattern_match is None or pattern_match.span()[1] != len(instance_of_property) ):
flash(f"The value of the instance of property must start with Q and then be followed by one or more digits (e.g. Q123). Currently, it is '{instance_of_property}'.", "danger")
return False
if len(language_code) != 2 or language_code.lower() != language_code:
flash(f"The language code must consist of two lowercase letters (e.g. en). Currently, it is '{language_code}'.", "danger")
return False
pattern_match = valid_named_entity_label_pattern.match(named_entity_label)
if pattern_match is None or pattern_match.span()[1] != len(named_entity_label):
flash(f"The label must only consist of the characters a-z (upper or lowercased) or the special characters - or _ (e.g. LOC or feature_film). Currently it is '{named_entity_label}'.", "danger")
return False
try:
depth_as_int = int(depth)
if depth_as_int < 0:
flash(f"The depth must be an integer >= 0. Currently it is '{depth}'.", "danger")
return False
except:
flash(f"The depth must be an integer >= 0. Currently it is '{depth}'.", "danger")
return False
return True | c71f744fef82e54ca2fad0ea64c8637692256299 | 17,035 |
def get_company_data(mid):
"""Looks up stock ticker information for a company via its Freebase ID."""
query = MID_TO_TICKER_QUERY % mid
bindings = make_wikidata_request(query)
if not bindings:
if mid:
print("%s No company data found for MID: %s" % (WARNING, mid))
return None
# Collect the data from the response.
companies = []
for binding in bindings:
try:
name = binding["companyLabel"]["value"]
except KeyError:
name = None
try:
root = binding["rootLabel"]["value"]
except KeyError:
root = None
try:
symbol = binding["tickerLabel"]["value"]
except KeyError:
symbol = None
try:
exchange = binding["exchangeNameLabel"]["value"]
except KeyError:
exchange = None
company = {"name": name,
"symbol": symbol,
"exchange": exchange}
# Add the root if there is one.
if root and root != name:
company["root"] = root
# Add to the list unless we already have the same entry.
if company not in companies:
print("%s Adding company data: %s" % (OK, company))
companies.append(company)
else:
print("%s Skipping duplicate company data: %s" % (WARNING, company))
return companies | 464b9ef795938b2d83fd6a629b9af09ff165a922 | 17,036 |
def embed_data_into_square_lattice(data):
"""Insert MR image into square 2D array."""
dims = np.array(data.shape)
offset_x = int((dims.max() - dims[0]) / 2.)
offset_y = int((dims.max() - dims[1]) / 2.)
temp = np.zeros((dims.max(), dims.max()))
temp[offset_x:offset_x+dims[0], offset_y:offset_y+dims[1]] = data
return temp | e701e871b4df9f4085b2548ad1e10f93ce33bf38 | 17,037 |
def is_partial_link_text_selector(selector):
"""
A basic method to determine if a selector is a partial link text selector.
"""
if (
selector.startswith("partial_link=")
or selector.startswith("partial_link_text=")
or selector.startswith("partial_text=")
or selector.startswith("p_link=")
or selector.startswith("p_link_text=")
or selector.startswith("p_text=")
):
return True
return False | 4f21143173e46ed273ca719ea1aac8489afa2395 | 17,038 |
def scell(obj, dims, method=1, **kwds):
"""Build supercell based on `dims`.
Uses coords_frac and cell.
Parameters
----------
obj : Structure or Trajectory
dims : tuple (nx, ny, nz) for a N = nx * ny * nz supercell
method : int, optional
Switch between numpy-ish (1) or loop (2) implementation. (2) should
always produce correct results but is sublty slower. Only for
Structure.
**kwds : see :func:`scell_mask`
Notes
-----
The mask for the supercell is created by :func:`scell_mask` and applied to
each atom in `obj` one after another, i.e. each atom is repeated nx*ny*nz
times according to the mask pattern, independently of how the pattern looks
like (e.g. the `direc` parameter in :func:`scell_mask`). So, just as rows
in np.repeat(), we have:
| original: symbols=[A,B,C,D]
| 2 x 1 x 1: symbols=[A,A,B,B,C,C,D,D]
| nx x ny x nz: symbols=[(nx*ny*nz) x A, (nx*ny*nz) x B, ...]
Returns
-------
scell : Structure
"""
# Place each atom N = nx*ny*nz times in the supercell, i.e. copy unit cell
# N times. Actually, N-1, since ix=iy=iz=0 is the unit cell itself.
#
# Let k = {x,y,z}.
#
# mask[j,:] = [ix, iy, iz], ik = integers (floats actually, but
# mod(ik, floor(ik)) == 0.0)
#
# original cell:
# coords_frac[i,:] = position vect of atom i in the unit cell in *crystal*
# coords!!
#
# super cell:
# sc_coords_frac[i,:] = coords_frac[i,:] + [ix, iy, iz]
# for all permutations (see scell_mask()) of ix, iy, iz.
# ik = 0, ..., nk - 1
#
# sc_coords_frac : crystal coords w.r.t the *old* cell, i.e. the entries are in
# [0,(max(dims))], not [0,1], is scaled below
#
if 'direc' not in kwds:
kwds['direc'] = 1
mask = scell_mask(*tuple(dims), **kwds)
nmask = mask.shape[0]
if obj.is_struct:
sc_cell = obj.cell * np.asarray(dims)[:,None]
container = Structure
elif obj.is_traj:
# (nstep,3,3) * (1,3,1) -> (nstep, 3,3)
sc_cell = obj.cell * np.asarray(dims)[None,:,None]
container = Trajectory
else:
raise Exception("unknown input type")
if method == 1:
sc_symbols = np.array(obj.symbols).repeat(nmask).tolist() if (obj.symbols
is not None) else None
if obj.is_struct:
# (natoms, 1, 3) + (1, nmask, 3) -> (natoms, nmask, 3)
sc_coords_frac = (obj.coords_frac[:,None,:]
+ mask[None,...]).reshape(obj.natoms*nmask,3)
elif obj.is_traj:
# cool, eh?
# (nstep, natoms, 1, 3) + (1, 1, nmask, 3) -> (nstep, natoms, nmask, 3)
sc_coords_frac = (obj.coords_frac[...,None,:]
+ mask[None,None,...]).reshape(obj.nstep,obj.natoms*nmask,3)
else:
raise Exception("huh!?")
# explicit loop version for testing, this is the reference implementation,
# only for Structure
elif method == 2:
if obj.is_struct:
sc_symbols = []
sc_coords_frac = np.empty((nmask*obj.natoms, 3), dtype=float)
k = 0
for iatom in range(obj.natoms):
for j in range(nmask):
if obj.symbols is not None:
sc_symbols.append(obj.symbols[iatom])
sc_coords_frac[k,:] = obj.coords_frac[iatom,:] + mask[j,:]
k += 1
else:
raise Exception("method=2 only implemented for Structure")
else:
raise Exception("unknown method: %s" %repr(method))
sc_coords_frac[...,0] /= dims[0]
sc_coords_frac[...,1] /= dims[1]
sc_coords_frac[...,2] /= dims[2]
return container(coords_frac=sc_coords_frac,
cell=sc_cell,
symbols=sc_symbols) | e0cf7e03323c5994d0c56ba171d168aed105cfda | 17,039 |
def create_config(device: str = 'CPU', *,
per_process_gpu_memory_fraction: float = 0.0,
log_device_placement: bool = False) -> tf.ConfigProto:
"""Creates tf.ConfigProto for specifi device"""
config = tf.ConfigProto(log_device_placement=log_device_placement)
if is_gpu(device):
if per_process_gpu_memory_fraction > 0.0:
config.gpu_options.per_process_gpu_memory_fraction = per_process_gpu_memory_fraction
else:
config.gpu_options.allow_growth = True
else:
config.device_count['GPU'] = 0
return config | 0b6f351bcad2d816d6c03896ed60223cf2bb90c9 | 17,040 |
from typing import Union
from typing import Dict
def format_childproc(cp: Union[Event, Dict]):
"""Format childproc event into single line."""
return f" @{as_configured_timezone(cp.get('event_timestamp'))}: {cp.get('childproc_cmdline')} - {cp.get('childproc_process_guid')}" | f233a0fca52fdbef8d7ed0177772c9a8d196ec0d | 17,041 |
def format_maven_jar_dep_name(group_id, artifact_id, repository = DEFAULT_REPOSITORY_NAME):
"""
group_id: str
artifact_id: str
repository: str = "maven"
"""
return "@%s//:%s" % (repository, format_maven_jar_name(group_id, artifact_id)) | a331ce788a510c09c32a1d2c7d1f8d4fbeaba975 | 17,043 |
import ctypes
def PCO_GetCameraName(handle):
"""
This function retrieves the name of the camera.
"""
f = pixelfly_dll.PCO_GetCameraName
f.argtypes = (ctypes.wintypes.HANDLE, ctypes.c_char_p, ctypes.wintypes.WORD)
f.restype = ctypes.c_int
cameraName = ctypes.create_string_buffer(41)
ret_code = f(handle, cameraName, 41)
PCO_manage_error(ret_code)
return cameraName.raw.decode("ascii") | f704f2a875f29f0876553c631de032d25b5166f4 | 17,044 |
def issingleton(var):
""" If isunitset(var) is True, this function returns True,
otherwise isscalar(var) is returned.
"""
# Here we define singleton as a unit set or scalar
if isunitset(var):
return True
return isscalar(var) | cd1808ad99647486e81e0f903047db9327b77fb8 | 17,046 |
def satisfiesF(L):
"""
Assumes L is a list of strings
Assume function f is already defined for you and it maps a string to a Boolean
Mutates L such that it contains all of the strings, s, originally in L such
that f(s) returns True, and no other elements. Remaining elements in L
should be in the same order.
Returns the length of L after mutation
"""
idx =0
while idx < len(L):
if f(L[idx]): # do nothing if f true
idx += 1
else: # remove the element if false
L.pop(idx)
return len(L) | 429c385f51ba254fff7170f4e69725cc98c8b337 | 17,047 |
def calc_circle_radius(area: float) -> float:
"""
Calculate radius from area.
>>> calc_circle_radius(10.0)
1.7841241161527712
"""
assert not area < 0
radius = numpy_to_python_type(np.sqrt(area / np.pi))
assert isinstance(radius, float)
return radius | 06086e1b130bef960fad200b350cba01b647466e | 17,048 |
import tqdm
def load_imgs(paths, target_size):
"""Load images from `paths`."""
pairs = np.empty((len(paths), 2, *target_size), dtype=np.float32)
for i, row in tqdm(paths.iterrows(), total=len(pairs)):
img1 = img_to_array(load_img(row.p1, target_size=target_size)) / 255
img2 = img_to_array(load_img(row.p2, target_size=target_size)) / 255
pair = np.stack([img1, img2], axis=0)
pairs[i, :] = pair
y = paths.target.values.astype(np.uint8)
return pairs, y | b91b86bcae29a6bf2d1227a25a2b8297c6be1734 | 17,049 |
def load_dict_from_hdf5(h5_filepath):
"""
Load h5 file as a dict
"""
def recursively_load_dict_contents_from_group(h5_obj, path):
"""
Recursively load a dict from h5 file
"""
ans = {}
for key, item in h5_obj[path].items():
if isinstance(item, h5py._hl.dataset.Dataset):
ans[key] = item.value
elif isinstance(item, h5py._hl.group.Group):
ans[key] = recursively_load_dict_contents_from_group(h5_obj, path + key + '/')
return ans
with h5py.File(h5_filepath, 'r') as h5_obj:
return recursively_load_dict_contents_from_group(h5_obj, '/') | 2339cc6edb83ed59fb43ec49503d86758d37d83e | 17,050 |
def interactive_grid_shape(grid, max_n=200, plotfxn=None, **kwargs):
""" Interactive ipywidgets for select the shape of a grid
Parameters
----------
grid : pygridgen.Gridgen
The base grid from which the grids of new shapes (resolutions) will be
generated.
max_n : int (default = 200)
The maximum number of possible cells in each dimension.
plotfxn : callable, optional
Function that plots the grid to provide user feedback. The call
signature of this function must accept to positional parameters for the
x- and y-arrays of node locations, and then accept any remaining keyword
arguments. If not provided, *pygridtools.viz.plot_cells* is used.
Additional Parameters
---------------------
All remaining keyword arguments are passed to *plotfxn*
Returns
-------
newgrid : pygridgen.Gridgen
The reshaped grid
widget : ipywidgets.interactive
Collection of IntSliders for changing the number cells along each axis
in the grid.
Examples
--------
>>> from pygridgen import grid
>>> from pygridtools import viz, iotools
>>> def make_fake_bathy(shape):
... j_cells, i_cells = shape
... y, x = numpy.mgrid[:j_cells, :i_cells]
... z = (y - (j_cells // 2))** 2 - x
... return z
>>> def plot_grid(x, y, ax=None):
... shape = x[1:, 1:].shape
... bathy = make_fake_bathy(shape)
... if not ax:
... fig, ax = pyplot.subplots(figsize=(8, 8))
... ax.set_aspect('equal')
... return viz.plot_cells(x, y, ax=ax, cmap='Blues', colors=bathy, lw=0.5, ec='0.3')
>>> d = numpy.array([
... (13, 16, 1.00), (18, 13, 1.00), (12, 7, 0.50),
... (10, 10, -0.25), ( 5, 10, -0.25), ( 5, 0, 1.00),
... ( 0, 0, 1.00), ( 0, 15, 0.50), ( 8, 15, -0.25),
... (11, 13, -0.25)])
>>> g = grid.Gridgen(d[:, 0], d[:, 1], d[:, 2], (75, 75), ul_idx=1, focus=None)
>>> new_grid, widget = iotools.interactive_grid_shape(g, plotfxn=plot_grid)
"""
if not plotfxn:
plotfxn = viz.plot_cells
common_opts = dict(min=2, max=max_n, continuous_update=False)
return grid, ipywidgets.interactive(
_change_shape,
g=ipywidgets.fixed(grid),
irows=ipywidgets.IntSlider(value=grid.ny, **common_opts),
jcols=ipywidgets.IntSlider(value=grid.nx, **common_opts),
plotfxn=ipywidgets.fixed(plotfxn),
plotopts=ipywidgets.fixed(kwargs)
) | ef126f39f8433a65deb22e09ea825f342a38bea1 | 17,051 |
from lgsvl.utils import transform_to_forward
from typing import Optional
def generate_initial_state(initial_pos: Transform, initial_speed: Optional[float] = None) -> AgentState:
"""
:param initial_speed: Initial speed in km/h
"""
movement = AgentState()
movement.transform = initial_pos
if initial_speed is not None:
movement.velocity = (initial_speed / 3.6) * transform_to_forward(movement.transform)
return movement | 30906410d3fe92b84f3d2c93a49db24f90a8ec8b | 17,052 |
def resolve_ami(ami=None, arch="x86_64", tags=frozenset(), tag_keys=frozenset()):
"""
Find an AMI by ID, name, or tags.
- If an ID is given, it is returned with no validation; otherwise, selects the most recent AMI from:
- All available AMIs in this account with the Owner tag equal to this user's IAM username (filtered by tags given);
- If no AMIs found, all available AMIs in this account with the AegeaVersion tag present (filtered by tags given);
- If no AMIs found, all available AMIs in this account (filtered by tags given).
Return the AMI with the most recent creation date.
"""
assert arch in {"x86_64", "arm64"}
if ami is None or not ami.startswith("ami-"):
if ami is None:
filters = dict(Owners=["self"],
Filters=[dict(Name="state", Values=["available"]), dict(Name="architecture", Values=[arch])])
else:
filters = dict(Owners=["self"], Filters=[dict(Name="name", Values=[ami])])
all_amis = resources.ec2.images.filter(**filters)
if tags:
all_amis = filter_by_tags(all_amis, **tags)
if tag_keys:
all_amis = filter_by_tag_keys(all_amis, *tag_keys)
current_user_amis = all_amis.filter(Filters=[dict(Name="tag:Owner", Values=[ARN.get_iam_username()])])
amis = sorted(current_user_amis, key=lambda x: x.creation_date)
if len(amis) == 0:
aegea_amis = all_amis.filter(Filters=[dict(Name="tag-key", Values=["AegeaVersion"])])
amis = sorted(aegea_amis, key=lambda x: x.creation_date)
if len(amis) == 0:
amis = sorted(all_amis, key=lambda x: x.creation_date)
if not amis:
raise AegeaException("Could not resolve AMI {}".format(dict(tags, ami=ami)))
ami = amis[-1].id
return ami | 32495fb78a611f57b0e025b0ff68b51a190c7297 | 17,053 |
def _filter_colors(hcl, ihue, nhues, minsat):
"""
Filter colors into categories.
Parameters
----------
hcl : tuple
The data.
ihue : int
The hue column.
nhues : int
The total number of hues.
minsat : float
The minimum saturation used for the "grays" column.
"""
breakpoints = np.linspace(0, 360, nhues)
gray = hcl[1] <= minsat
if ihue == 0:
return gray
color = breakpoints[ihue - 1] <= hcl[0] < breakpoints[ihue]
if ihue == nhues - 1:
color = color or color == breakpoints[ihue] # endpoint inclusive
return not gray and color | f7ca00bdd17766c859b5262c1b9ae12187c23222 | 17,054 |
def SWO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "swo.owl", **kwargs
) -> Graph:
"""Return SWO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "swo.owl"
Version to retrieve
The available versions are:
- swo.owl
"""
return AutomaticallyRetrievedGraph(
"SWO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)() | 39936ffcd272e0d6c6c7e5510699bac68a465eb9 | 17,055 |
def upload_needed_files (handle, bucket, prefix, dir_path, kind, iter):
"""
upload the needed local files of a particular kind
"""
extension = f".{kind}"
count = 0
for uuid in iter:
file_name = uuid + extension
local_path = dir_path / file_name
grid_path = prefix + "/pub/" + kind + "/"
#print("uploading {} to {}".format(local_path, grid_path))
upload_file(handle, local_path.as_posix(), grid_path + file_name)
count += 1
return count | 9357e991eb14eaf9de54beda3ec86defc3e1ecaf | 17,056 |
def detect_tag(filename):
"""Return type and position of ID3v2 tag in filename.
Returns (tag_class, offset, length), where tag_class
is either Tag22, Tag23, or Tag24, and (offset, length)
is the position of the tag in the file.
"""
with fileutil.opened(filename, "rb") as file:
file.seek(0)
header = file.read(10)
file.seek(0)
if len(header) < 10:
raise NoTagError("File too short")
if header[0:3] != b"ID3":
raise NoTagError("ID3v2 tag not found")
if header[3] not in _tag_versions or header[4] != 0:
raise TagError("Unknown ID3 version: 2.{0}.{1}"
.format(*header[3:5]))
cls = _tag_versions[header[3]]
offset = 0
length = Syncsafe.decode(header[6:10]) + 10
if header[3] == 4 and header[5] & _TAG24_FOOTER:
length += 10
return (cls, offset, length) | 5b32c122d804aa5def21c59e73e4369c64b7cbbe | 17,057 |
def merge_peaks(peaks, start_merge_at, end_merge_at,
max_buffer=int(1e5)):
"""Merge specified peaks with their neighbors, return merged peaks
:param peaks: Record array of strax peak dtype.
:param start_merge_at: Indices to start merge at
:param end_merge_at: EXCLUSIVE indices to end merge at
:param max_buffer: Maximum number of samples in the sum_waveforms of
the resulting peaks (after merging).
Peaks must be constructed based on the properties of constituent peaks,
it being too time-consuming to revert to records/hits.
"""
assert len(start_merge_at) == len(end_merge_at)
new_peaks = np.zeros(len(start_merge_at), dtype=peaks.dtype)
# Do the merging. Could numbafy this to optimize, probably...
buffer = np.zeros(max_buffer, dtype=np.float32)
for new_i, new_p in enumerate(new_peaks):
old_peaks = peaks[start_merge_at[new_i]:end_merge_at[new_i]]
common_dt = np.gcd.reduce(old_peaks['dt'])
first_peak, last_peak = old_peaks[0], old_peaks[-1]
new_p['channel'] = first_peak['channel']
# The new endtime must be at or before the last peak endtime
# to avoid possibly overlapping peaks
new_p['time'] = first_peak['time']
new_p['dt'] = common_dt
new_p['length'] = \
(strax.endtime(last_peak) - new_p['time']) // common_dt
# re-zero relevant part of buffer (overkill? not sure if
# this saves much time)
buffer[:min(
int(
(
last_peak['time']
+ (last_peak['length'] * old_peaks['dt'].max())
- first_peak['time']) / common_dt
),
len(buffer)
)] = 0
for p in old_peaks:
# Upsample the sum waveform into the buffer
upsample = p['dt'] // common_dt
n_after = p['length'] * upsample
i0 = (p['time'] - new_p['time']) // common_dt
buffer[i0: i0 + n_after] = \
np.repeat(p['data'][:p['length']], upsample) / upsample
# Handle the other peak attributes
new_p['area'] += p['area']
new_p['area_per_channel'] += p['area_per_channel']
new_p['n_hits'] += p['n_hits']
new_p['saturated_channel'][p['saturated_channel'] == 1] = 1
# Downsample the buffer into new_p['data']
strax.store_downsampled_waveform(new_p, buffer)
new_p['n_saturated_channels'] = new_p['saturated_channel'].sum()
# Use the tight coincidence of the peak with the highest amplitude
i_max_subpeak = old_peaks['data'].max(axis=1).argmax()
new_p['tight_coincidence'] = old_peaks['tight_coincidence'][i_max_subpeak]
# If the endtime was in the peaks we have to recompute it here
# because otherwise it will stay set to zero due to the buffer
if 'endtime' in new_p.dtype.names:
new_p['endtime'] = strax.endtime(last_peak)
return new_peaks | 75f86b0c27cb2cac145234cfd9254105048be9a8 | 17,058 |
def batchedpatternsgenerator(generatorfunction):
"""Decorator that assumes patterns (X,y) and stacks them in batches
This can be thought of a specialized version of the batchedgenerator
that assumes the base generator returns instances of data patterns,
as tuples of numpy arrays (X,y). When grouping them in batches the
numpy arrays are stacked so that each returned batch has a pattern
per row.
A "batchsize" parameter is added to the generator, that if specified
groups the data in batches of such size.
"""
def modgenerator(*args, **kwargs):
for batch in batchedgenerator(generatorfunction)(*args, **kwargs):
Xb, yb = zip(*batch)
yield np.stack(Xb), np.stack(yb)
return modgenerator | 19a8e8d5c2872c38d469c41e163a947f208fc806 | 17,059 |
def reduce_min(raw_tensor, axis, keepdims=False):
"""
calculate reduce_min of raw_tensor, only support float16
Args:
raw_tensor (tvm.tensor.Tensor): input tensor
axis (Union[int, list]): reduce axis (range : [-len(raw_tensor.shape), len(raw_tensor.shape) - 1])
keepdims (bool): if true, retains reduced dimensions with length 1, default value is None
Returns:
tvm.tensor.Tensor, res
"""
return single_reduce_op(raw_tensor, axis, "reduce_min", keepdims) | b4473ca577a939f3c149758fd73a59c79b1f0db0 | 17,060 |
def align2local(seq):
"""
Returns list such that
'ATG---CTG-CG' ==> [0,1,2,2,2,3,4,5,5,6,7]
Used to go from align -> local space
"""
i = -1
lookup = []
for c in seq:
if c != "-":
i += 1
lookup.append(i)
return lookup | aa914a60d5db7801a3cf1f40e713e95c98cd647e | 17,061 |
def load_nodegraph(filename):
"""Load a nodegraph object from the given filename and return it.
Keyword argument:
filename -- the name of the nodegraph file
"""
nodegraph = _Nodegraph(1, [1])
nodegraph.load(filename)
return nodegraph | cd552fda874f1e8667bd09e95bdf43e6c5bd75c1 | 17,062 |
def get_bprop_sqrt(self):
"""Grad definition for `Sqrt` operation."""
mul_func = P.Mul()
fill_func = P.Fill()
div_op = P.RealDiv()
sqrt = P.Sqrt()
dtype = P.DType()
def bprop(x, out, dout):
temp = div_op(fill_func(dtype(x), shape_op(x), 0.5), sqrt(x))
dx = mul_func(dout, temp)
return (dx,)
return bprop | b297695effd9d063384b3343337d1647050b5f1a | 17,063 |
def classify_top1_batch(image):
"""Define method `classify_top1` for servable `resnet50`.
The input is `image` and the output is `lable`."""
x = register.add_stage(preprocess_batch, image, outputs_count=1, batch_size=1024)
x = register.add_stage(resnet_model, x, outputs_count=1)
x = register.add_stage(postprocess_top1, x, outputs_count=1)
return x | ff4ae67619f29e0e22e275845709ab73daabe2f0 | 17,064 |
def ngram_word(max_features=2_000):
"""Word count vectorizer.
Args:
max_features: number of features to consider.
"""
return CountVectorizer(
ngram_range=(1, 3),
analyzer='word',
max_features=max_features,
) | 2b8935b72a836ff6ab3cdb0b17939806d9f7ce02 | 17,065 |
def func_dispatcher(intent):
"""
Simple effect dispatcher that takes callables taking a box,
and calls them with the given box.
"""
def performer(dispatcher, intent, box):
intent(box)
return performer | 48dc23a8124569d5537c38b8f704fdea282853e8 | 17,066 |
import multiprocessing
import tqdm
def encode(x, bps_arrangement='random', n_bps_points=512, radius=1.5, bps_cell_type='dists',
verbose=1, random_seed=13, x_features=None, custom_basis=None, n_jobs=-1):
"""Converts point clouds to basis point set (BPS) representation, multi-processing version
Parameters
----------
x: numpy array [n_clouds, n_points, n_dims]
batch of point clouds to be converted
bps_arrangement: str
supported BPS arrangements: "random", "grid", "custom"
n_bps_points: int
number of basis points
radius: float
radius for BPS sampling area
bps_cell_type: str
type of information stored in every BPS cell. Supported:
'dists': Euclidean distance to the nearest point in cloud
'deltas': delta vector from basis point to the nearest point
'closest': closest point itself
'features': return features of the closest point supplied by x_features.
e.g. RGB values of points, surface normals, etc.
verbose: boolean
whether to show conversion progress
x_features: numpy array [n_clouds, n_points, n_features]
point features that will be stored in BPS cells if return_values=='features'
custom_basis: numpy array [n_basis_points, n_dims]
custom basis to use
n_jobs: int
number of parallel jobs used for encoding. If -1, use all available CPUs
Returns
-------
x_bps: [n_clouds, n_points, n_bps_features]
point clouds converted to BPS representation.
"""
if n_jobs == -1:
n_jobs = multiprocessing.cpu_count()
if n_jobs == 1:
n_clouds, n_points, n_dims = x.shape
if bps_arrangement == 'random':
basis_set = generate_random_basis(n_bps_points, n_dims=n_dims, radius=radius, random_seed=random_seed)
elif bps_arrangement == 'grid':
# in case of a grid basis, we need to find the nearest possible grid size
grid_size = int(np.round(np.power(n_bps_points, 1 / n_dims)))
basis_set = generate_grid_basis(grid_size=grid_size, minv=-radius, maxv=radius)
elif bps_arrangement == 'custom':
# in case of a grid basis, we need to find the nearest possible grid size
if custom_basis is not None:
basis_set = custom_basis
else:
raise ValueError("Custom BPS arrangement selected, but no custom_basis provided.")
else:
raise ValueError("Invalid basis type. Supported types: \'random\', \'grid\', \'custom\'")
n_bps_points = basis_set.shape[0]
if bps_cell_type == 'dists':
x_bps = np.zeros([n_clouds, n_bps_points])
elif bps_cell_type == 'deltas':
x_bps = np.zeros([n_clouds, n_bps_points, n_dims])
elif bps_cell_type == 'closest':
x_bps = np.zeros([n_clouds, n_bps_points, n_dims])
elif bps_cell_type == 'features':
n_features = x_features.shape[2]
x_bps = np.zeros([n_clouds, n_bps_points, n_features])
else:
raise ValueError("Invalid cell type. Supported types: \'dists\', \'deltas\', \'closest\', \'features\'")
fid_lst = range(0, n_clouds)
if verbose:
fid_lst = tqdm(fid_lst)
for fid in fid_lst:
nbrs = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm="ball_tree").fit(x[fid])
fid_dist, npts_ix = nbrs.kneighbors(basis_set)
if bps_cell_type == 'dists':
x_bps[fid] = fid_dist.squeeze()
elif bps_cell_type == 'deltas':
x_bps[fid] = x[fid][npts_ix].squeeze() - basis_set
elif bps_cell_type == 'closest':
x_bps[fid] = x[fid][npts_ix].squeeze()
elif bps_cell_type == 'features':
x_bps[fid] = x_features[fid][npts_ix].squeeze()
return x_bps
else:
if verbose:
print("using %d available CPUs for BPS encoding.." % n_jobs)
bps_encode_func = partial(encode, bps_arrangement=bps_arrangement, n_bps_points=n_bps_points, radius=radius,
bps_cell_type=bps_cell_type, verbose=verbose, random_seed=random_seed,
x_features=x_features, custom_basis=custom_basis, n_jobs=1)
pool = multiprocessing.Pool(n_jobs)
x_chunks = np.array_split(x, n_jobs)
x_bps = np.concatenate(pool.map(bps_encode_func, x_chunks), 0)
pool.close()
return x_bps | 66edc2dd5d42fe53e55f2e5b95e2069123510006 | 17,068 |
def parsec_params_list_to_dict(var):
"""
convert parsec parameter array to dictionary
:param var:
:return:
"""
parsec_params = dict()
parsec_params["rle"] = var[0]
parsec_params["x_pre"] = var[1]
parsec_params["y_pre"] = var[2]
parsec_params["d2ydx2_pre"] = var[3]
parsec_params["th_pre"] = var[4]
parsec_params["x_suc"] = var[5]
parsec_params["y_suc"] = var[6]
parsec_params["d2ydx2_suc"] = var[7]
parsec_params["th_suc"] = var[8]
return parsec_params | 4ea4b4d2c0cbcb8fb49619e103b09f354c80de6a | 17,069 |
def parse_msiinfo_suminfo_output(output_string):
"""
Return a dictionary containing information from the output of `msiinfo suminfo`
"""
# Split lines by newline and place lines into a list
output_list = output_string.splitlines()
results = {}
# Partition lines by the leftmost ":", use the string to the left of ":" as
# the key and use the string to the right of ":" as the value
for output in output_list:
key, _, value = output.partition(':')
if key:
results[key] = value.strip()
return results | 6883e8fba9a37b9f877bdf879ebd14d1120eb88a | 17,070 |
from typing import Dict
from typing import Any
import json
from datetime import datetime
def create_indicators_fields(tag_details: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns the indicator fields
Args:
tag_details: a dictionary containing the tag details.
Returns:
A dictionary represents the indicator fields.
"""
fields: Dict[str, Any] = {}
tag = tag_details.get('tag', {})
refs = json.loads(tag.get('refs', '[]'))
fields['publications'] = create_publications(refs)
fields['aliases'] = tag_details.get('aliases', [])
fields['description'] = tag.get('description', '')
last_hit = tag.get('lasthit', '')
fields['lastseenbysource'] = datetime.strptime(last_hit, AF_TAGS_DATE_FORMAT).strftime(
DATE_FORMAT) if last_hit else None
updated_at = tag.get('updated_at', '')
fields['updateddate'] = datetime.strptime(updated_at, AF_TAGS_DATE_FORMAT).strftime(
DATE_FORMAT) if updated_at else None
fields['reportedby'] = tag.get('source', '')
remove_nulls_from_dictionary(fields)
return fields | 349ab542d2c25cb24fe40aeb98c16a9bfccc871f | 17,071 |
def spatial_difference(gdf1: GeoDataFrame, gdf2: GeoDataFrame) -> GeoDataFrame:
"""Removes polygons from the first GeoDataFrame that intersect with polygons from the second GeoDataFrame
:param gdf1: First input data frame
:param gdf2: Second input data frame
:return: Resulting data frame
"""
gdf2 = gdf2[["geometry"]]
intersections = gpd.sjoin(gdf1, gdf2, how="left")
result_gdf = intersections[intersections["index_right"].isna()]
result_gdf = result_gdf.drop(columns=["index_right"])
return result_gdf | 2713376f45ed574399f9f406a06a60a47f002579 | 17,072 |
def frustumShellIxx(rb, rt, t, h, diamFlag=False):
"""This function returns a frustum's mass-moment of inertia (divided by density) about the
transverse x/y-axis passing through the center of mass with radii or diameter inputs.
NOTE: This is for a frustum SHELL, not a solid
INPUTS:
Parameters
----------
rb : float (scalar/vector), base radius
rt : float (scalar/vector), top radius
t : float (scalar/vector), thickness
h : float (scalar/vector), height
diamFlag : boolean, True if rb and rt are entered as diameters
OUTPUTs:
-------
Ixx=Iyy : float (scalar/vector), Moment of inertia about x/y-axis through center of mass (principle axes)
"""
if diamFlag:
# Convert diameters to radii
rb *= 0.5
rt *= 0.5
# Integrate 2*pi*r*dr*dz from r=ri(z) to ro(z), z=0 to h
rb_o = rb
rb_i = rb-t
rt_o = rt
rt_i = rt-t
return (frustumIxx(rb_o, rt_o, h) - frustumIxx(rb_i, rt_i, h)) | 3d23805d4f7ed952b50752ac4ab8158c2826137f | 17,073 |
def default_shaders():
"""
Returns a list with all thte default shadres of the current DCC
:return: str
"""
return shader_utils.get_default_shaders() | 795ca337c9ba163bb70ce4ff226c04d1034ec542 | 17,074 |
def get_domains_and_slugs():
"""
returns all the domain names and slugs as dictionary
{domain_name: slug}
"""
return_data = {}
domain_slugs = Domain.objects.filter(active=1).order_by('name')
if domain_slugs:
for domain in domain_slugs:
return_data[domain.name] = domain.slug
return return_data | d19af879fe96895808f1c1815d3cc563499d358d | 17,075 |
def has_video_ads() -> bool:
"""has_video_ads() -> bool
(internal)
"""
return bool() | 6b4822bb18171df5bfc5b4f3797e574557cd65dd | 17,076 |
from typing import Iterable
def compile_sites(inp: NetInput,
y_true: Iterable[np.ndarray],
y_pred: Iterable[np.ndarray],
masks: Iterable[np.ndarray]):
"""
Prepares sites to be dumped in tsv file
:param inp: NetInput
:param y_true: True known classes mapped on templates
:param y_pred: True predicted classes mapped on templates
:param masks: boolean numpy arrays with
True placed at positions of any class that
could be positive
:return: Iterable over Sites
"""
positions = (np.where(y > 0)[0] + 1 for y in masks)
def comp_site(id_, pos, cls_pred, cls_true):
site = [id_, pos, 0, 0]
if cls_pred:
site[2] = 1
if cls_true:
site[3] = 1
return Site(*site)
sites = chain.from_iterable(
((id_, pos, p, t) for pos, p, t in zip(pp, yp, yt))
for id_, pp, yp, yt in zip(inp.ids, positions, y_pred, y_true))
return starmap(comp_site, sites) | 14f655e18b5651c22373d4c23b51f55704cd63c8 | 17,078 |
def femda_estimator(X, labels, eps = 1e-5, max_iter = 20):
""" Estimates the matrix of means and the tensor of scatter matrix of the dataset using MLE estimator.
To tackle singular matrix issues, we use regularization.
Parameters
----------
X : 2-d array of size n*m
matrix of all the samples generated
labels : 1-d array of size n
vector of the label of each sample
eps : float > 0
criterion of termination when solving the fixed-point equation
max_iter : integer > 1
number of maximum iterations to solve the fixed-point equation
Returns
-------
means : 2-d array of size K*m
matrix of the robust estimation of the mean of the K clusters
shapes : 3-d array of size K*m*m
tensor of the robust estimation of shape matrix of the K clusters
"""
n, m = X.shape
K = int(max(set(labels)) + 1)
n_clusters = np.zeros(K) + 1e-5
for i in range(n):
n_clusters[int(labels[i])] = n_clusters[int(labels[i])] + 1
means, shapes = classic_estimator(X, labels)
for k in range(K):
convergence = False
ite = 1
while (not convergence) and ite<max_iter:
ite = ite + 1
mean = np.zeros(m)
shape = np.zeros([m,m])
sum_mean_weights = 1e-5
for i in range(n):
if labels[i] == k:
mean_weight = min([[0.5]], 1 / np.dot(np.array([X[i]-means[k]]), np.dot(np.linalg.inv(regularize(shapes[k])), np.array([X[i]-means[k]]).T)))[0][0]
#print(mean_weight)
mean = mean + mean_weight * X[i]
sum_mean_weights = sum_mean_weights + mean_weight
shape = shape + np.dot(np.array([X[i]-means[k]]).T, np.array([X[i]-means[k]])) * mean_weight
delta_mean = mean / sum_mean_weights - means[k]
delta_shape = shape * m / n_clusters[k] - shapes[k]
means[k] = means[k] + delta_mean
shapes[k] = shapes[k] + delta_shape
print("trace at", ite, np.trace(shapes[k]))
convergence = sum(abs(delta_mean)) + sum(sum(abs(delta_shape))) < eps
shapes[k] = regularize(shapes[k])
return means, shapes | 639532f9307e023561d6193730473533b240fb28 | 17,079 |
def get_collections():
"""read .db file, return raw collection"""
col = {}
f = open(collection_db, "rb")
version = nextint(f)
ncol = nextint(f)
for i in range(ncol):
colname = nextstr(f)
col[colname] = []
for j in range(nextint(f)):
f.read(2)
col[colname].append(f.read(32).decode("utf-8"))
f.close()
return (col, version) | b134e7e970fa7f5486226d2c2cab3c63ab9f67c3 | 17,080 |
def ot_has_small_bandgap(cp2k_input, cp2k_output, bandgap_thr_ev):
""" Returns True if the calculation used OT and had a smaller bandgap then the guess needed for the OT.
(NOTE: It has been observed also negative bandgap with OT in CP2K!)
cp2k_input: dict
cp2k_output: dict
bandgap_thr_ev: float [eV]
"""
list_true = [True, 'T', 't', '.TRUE.', 'True', 'true'] #add more?
try:
ot_settings = cp2k_input['FORCE_EVAL']['DFT']['SCF']['OT']
if '_' not in ot_settings.keys() or ot_settings['_'] in list_true: #pylint: disable=simplifiable-if-statement
using_ot = True
else:
using_ot = False
except KeyError:
using_ot = False
min_bandgap_ev = min(cp2k_output["bandgap_spin1_au"], cp2k_output["bandgap_spin2_au"]) * HARTREE2EV
is_bandgap_small = (min_bandgap_ev < bandgap_thr_ev)
return using_ot and is_bandgap_small | fbc63c373d052111932ea0fd2cd458d59b486d10 | 17,081 |
def profile():
"""Checking if user is already logged_in"""
if 'logged_in' in session:
'''getting all the account info for the user for displaying it on the profile page'''
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM accounts WHERE username = %s', (session['employee_uname'],))
account = cursor.fetchone()
'''Showing profile page with account info to the employee'''
return render_template('profile.html', acc=account)
'''if User is not logged_in redirect to login page'''
return redirect(url_for('login')) | 4796058d3bbc911cc0610b7a5458be80fa330d67 | 17,082 |
import time
import json
async def ping(ws):
"""Send a ping request on an established websocket connection.
:param ws: an established websocket connection
:return: the ping response
"""
ping_request = {
'emit': "ping",
'payload': {
'timestamp': int(time.time())
}
}
await ws.send(json.dumps(ping_request))
return json.loads(await ws.recv()) | 587d2a72cbc5f50f0ffb0bda63668a0ddaf4c9c3 | 17,084 |
def split_files_each_proc(file_arr,nprocs):
""" Returns array that distributes samples across all processors. """
ntot = len(file_arr)
post_proc_file_arr = []
for i in range(0,nprocs):
each_proc_arr = []
ib,ie = split_array_old(ntot,nprocs,i)
if i == 0:
max_no = (ie-ib)+1
for j in range(ib,ie+1):
each_proc_arr.append(j)
if len(each_proc_arr) > max_no:
max_no = len(each_proc_arr)
elif len(each_proc_arr) < max_no :
for k in range(0,max_no-(len(each_proc_arr))):
each_proc_arr.append("no file")
max_no = len(each_proc_arr)
post_proc_file_arr.append(each_proc_arr)
return post_proc_file_arr | 0c5c481d1b9a9e0d5c6efdfb7abf6669f5a05ecf | 17,085 |
def get_data_generators_for_output(output):
""" Get the data generators involved in an output
Args:
output (:obj:`Output`): report or plot
Returns:
:obj:`set` of :obj:`DataGenerator`: data generators involved in the output
"""
data_generators = set()
if isinstance(output, Report):
for data_set in output.data_sets:
data_generators.add(data_set.data_generator)
elif isinstance(output, Plot2D):
for curve in output.curves:
data_generators.add(curve.x_data_generator)
data_generators.add(curve.y_data_generator)
elif isinstance(output, Plot3D):
for surface in output.surfaces:
data_generators.add(surface.x_data_generator)
data_generators.add(surface.y_data_generator)
data_generators.add(surface.z_data_generator)
else:
raise NotImplementedError('Output of type {} is not supported.'.format(output.__class__.__name__))
if None in data_generators:
data_generators.remove(None)
return data_generators | d05fde5b5ce25504b53d8ca4491235d3ab3b8680 | 17,086 |
from re import M
def clustering_report(y_true, y_pred) -> pd.DataFrame:
"""
Generate cluster evaluation metrics.
Args:
y_true: Array of actual labels
y_pred: Array of predicted clusters
Returns:
Pandas DataFrame with metrics.
"""
return pd.DataFrame(
{
"Homogeneity": M.homogeneity_score(y_true, y_pred),
"Completeness": M.completeness_score(y_true, y_pred),
"V-Measure": M.v_measure_score(y_true, y_pred),
"Adjusted Rand Index": M.adjusted_rand_score(y_true, y_pred),
"Adjusted Mutual Information": M.adjusted_mutual_info_score(y_true, y_pred),
},
index=["value"],
).T | dc124dc4f248a2acedfd6201a205f285adc6ec1c | 17,088 |
from typing import List
from typing import Sequence
from typing import Set
from typing import Tuple
def _create_sampler_data(
datastores: List[Datastore], variables: Sequence[Variable],
preconditions: Set[LiftedAtom], add_effects: Set[LiftedAtom],
delete_effects: Set[LiftedAtom], param_option: ParameterizedOption,
datastore_idx: int
) -> Tuple[List[SamplerDatapoint], List[SamplerDatapoint]]:
"""Generate positive and negative data for training a sampler."""
# Populate all positive data.
positive_data: List[SamplerDatapoint] = []
for (segment, var_to_obj) in datastores[datastore_idx]:
option = segment.get_option()
state = segment.states[0]
if CFG.sampler_learning_use_goals:
# Right now, we're making the assumption that all data is
# demonstration data when we're learning samplers with goals.
# In the future, we may weaken this assumption.
goal = segment.get_goal()
else:
goal = None
assert all(
pre.predicate.holds(state, [var_to_obj[v] for v in pre.variables])
for pre in preconditions)
positive_data.append((state, var_to_obj, option, goal))
# Populate all negative data.
negative_data: List[SamplerDatapoint] = []
if CFG.sampler_disable_classifier:
# If we disable the classifier, then we never provide
# negative examples, so that it always outputs 1.
return positive_data, negative_data
for idx, datastore in enumerate(datastores):
for (segment, var_to_obj) in datastore:
option = segment.get_option()
state = segment.states[0]
if CFG.sampler_learning_use_goals:
# Right now, we're making the assumption that all data is
# demonstration data when we're learning samplers with goals.
# In the future, we may weaken this assumption.
goal = segment.get_goal()
else:
goal = None
trans_add_effects = segment.add_effects
trans_delete_effects = segment.delete_effects
if option.parent != param_option:
continue
var_types = [var.type for var in variables]
objects = list(state)
for grounding in utils.get_object_combinations(objects, var_types):
if len(negative_data
) >= CFG.sampler_learning_max_negative_data:
# If we already have more negative examples
# than the maximum specified in the config,
# we don't add any more negative examples.
return positive_data, negative_data
# If we are currently at the datastore that we're learning a
# sampler for, and this datapoint matches the positive
# grounding, this was already added to the positive data, so
# we can continue.
if idx == datastore_idx:
positive_grounding = [var_to_obj[var] for var in variables]
if grounding == positive_grounding:
continue
sub = dict(zip(variables, grounding))
# When building data for a datastore with effects X, if we
# encounter a transition with effects Y, and if Y is a superset
# of X, then we do not want to include the transition as a
# negative example, because if Y was achieved, then X was also
# achieved. So for now, we just filter out such examples.
ground_add_effects = {e.ground(sub) for e in add_effects}
ground_delete_effects = {e.ground(sub) for e in delete_effects}
if ground_add_effects.issubset(trans_add_effects) and \
ground_delete_effects.issubset(trans_delete_effects):
continue
# Add this datapoint to the negative data.
negative_data.append((state, sub, option, goal))
return positive_data, negative_data | ecf7ed06183264722df5d6e2d645bb899cb8358b | 17,089 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.