content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_rdf_lables(obj_list):
"""Get rdf:labels from a given list of objects."""
rdf_labels = []
for obj in obj_list:
rdf_labels.append(obj['rdf:label'])
return rdf_labels | 2bcf6a6e8922e622de602f5956747955ea39eeda | 4,700 |
import json
def _create_model_fn(pipeline_proto, is_chief=True):
"""Creates a callable that build the model.
Args:
pipeline_proto: an instance of pipeline_pb2.Pipeline.
Returns:
model_fn: a callable that takes [features, labels, mode, params] as inputs.
"""
if not isinstance(pipeline_proto, pipeline_pb2.Pipeline):
raise ValueError('pipeline_proto has to be an instance of Pipeline.')
def _model_fn(features, labels, mode, params):
"""
Args:
features: a dict mapping from names to tensors, denoting the features.
labels: a dict mapping from names to tensors, denoting the labels.
mode: mode parameter required by the estimator.
params: additional parameters used for creating the model.
Returns:
an instance of EstimatorSpec.
"""
is_training = (tf.estimator.ModeKeys.TRAIN == mode)
tf.logging.info("Current mode is %s, is_training=%s", mode, is_training)
model = builder.build(pipeline_proto.model, is_training)
predictions = model.build_prediction(features)
# Get scaffold and variables_to_train.
scaffold = model.get_scaffold()
variables_to_train = model.get_variables_to_train()
# Compute losses. Note: variables created in build_loss are not trainable.
losses = model.build_loss(predictions, examples=features)
for name, loss in losses.items():
tf.losses.add_loss(loss)
tf.summary.scalar('loss/' + name, loss)
for loss in tf.losses.get_regularization_losses():
tf.summary.scalar(
"loss/regularization/" + '/'.join(loss.op.name.split('/')[:2]), loss)
total_loss = tf.losses.get_total_loss(add_regularization_losses=True)
train_op = None
eval_metric_ops = None
training_hooks = []
if tf.estimator.ModeKeys.TRAIN == mode:
train_config = pipeline_proto.train_config
# Create the optimizer.
learning_rate = train_config.learning_rate
global_step = tf.train.get_or_create_global_step()
if train_config.HasField('learning_rate_decay'):
learning_rate = tf.train.exponential_decay(
learning_rate,
global_step,
train_config.learning_rate_decay.decay_steps,
train_config.learning_rate_decay.decay_rate,
staircase=train_config.learning_rate_decay.staircase)
tf.summary.scalar('loss/learning_rate', learning_rate)
optimizer = training_utils.build_optimizer(
train_config.optimizer, learning_rate=learning_rate)
# Setup the replicas_hook for the SyncReplicasOptimizer.
if train_config.sync_replicas:
optimizer = tf.train.SyncReplicasOptimizer(
optimizer, replicas_to_aggregate=4)
sync_replicas_hook = optimizer.make_session_run_hook(is_chief)
training_hooks.append(sync_replicas_hook)
# Enable MovingAverageOptimizer if specified.
if train_config.HasField('moving_average_decay'):
optimizer = tf.contrib.opt.MovingAverageOptimizer(
optimizer, average_decay=train_config.moving_average_decay)
# Apply gradient multipliers.
trainable_variables = []
gradient_multipliers = {}
for var in variables_to_train:
add_to_trainable_variables = True
for multiplier in train_config.gradient_multiplier:
if var.op.name.startswith(multiplier.scope):
if var.op.name in gradient_multipliers:
tf.logging.warn('Override gradient multiplier: %s', var.op.name)
gradient_multipliers[var.op.name] = multiplier.multiplier
if multiplier.multiplier > 0:
add_to_trainable_variables = True
else:
add_to_trainable_variables = False
# Add to trainable variables.
if add_to_trainable_variables:
trainable_variables.append(var)
tf.logging.info('Variable to train: %s, %s', var.op.name,
var.get_shape())
elif var.op.name in gradient_multipliers:
del gradient_multipliers[var.op.name]
tf.logging.info('Apply gradient multipliers: \n%s',
json.dumps(gradient_multipliers, indent=2))
def transform_grads_fn(grads):
if gradient_multipliers:
grads = tf.contrib.training.multiply_gradients(
grads, gradient_multipliers)
if train_config.HasField('max_gradient_norm'):
grads = tf.contrib.training.clip_gradient_norms(
grads, max_norm=train_config.max_gradient_norm)
return grads
# The train_op is required for mode `TRAIN`.
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
variables_to_train=trainable_variables,
transform_grads_fn=transform_grads_fn,
summarize_gradients=True)
if train_config.HasField('moving_average_decay'):
scaffold = tf.train.Scaffold(
saver=optimizer.swapping_saver(), copy_from_scaffold=scaffold)
elif tf.estimator.ModeKeys.EVAL == mode:
# The eval_metric_ops is optional for mode `EVAL`.
eval_metric_ops = model.build_evaluation(predictions, examples=features)
elif tf.estimator.ModeKeys.PREDICT == mode:
# The predictions is required for mode `PREDICT`.
predictions.update(features)
predictions.update({'summary': tf.summary.merge_all()})
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=total_loss,
train_op=train_op,
training_hooks=training_hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
return _model_fn | f29e86a0bc1355a7cf509e57ad0262bc5a9ca1e5 | 4,701 |
def boolean_automatic(meshes, operation, **kwargs):
"""
Automatically pick an engine for booleans based on availability.
Parameters
--------------
meshes : list of Trimesh
Meshes to be booleaned
operation : str
Type of boolean, i.e. 'union', 'intersection', 'difference'
Returns
---------------
result : trimesh.Trimesh
Result of boolean operation
"""
if interfaces.blender.exists:
result = interfaces.blender.boolean(meshes, operation, **kwargs)
elif interfaces.scad.exists:
result = interfaces.scad.boolean(meshes, operation, **kwargs)
else:
raise ValueError('No backends available for boolean operations!')
return result | 7e5b1a483862bb05bb4cd78d21ec22c835f218e6 | 4,702 |
from .workflow import WorkSpec
def get_context(work=None):
"""Get a concrete Context object.
Args:
work (gmx.workflow.WorkSpec): runnable work as a valid gmx.workflow.WorkSpec object
Returns:
An object implementing the :py:class:`gmx.context.Context` interface, if possible.
Raises:
gmx.exceptions.ValueError if an appropriate context for ``work`` could not be loaded.
If work is provided, return a Context object capable of running the provided work or produce an error.
The semantics for finding Context implementations needs more consideration, and a more informative exception
is likely possible.
A Context can run the provided work if
* the Context supports can resolve all operations specified in the elements
* the Context supports DAG topologies implied by the network of dependencies
* the Context supports features required by the elements with the specified parameters,
such as synchronous array jobs.
"""
# We need to define an interface for WorkSpec objects so that we don't need
# to rely on typing and inter-module dependencies.
workspec = None
if work is not None:
if isinstance(work, WorkSpec):
workspec = work
elif hasattr(work, 'workspec') and isinstance(work.workspec,
WorkSpec):
workspec = work.workspec
else:
raise exceptions.ValueError('work argument must provide a gmx.workflow.WorkSpec.')
if workspec is not None and \
hasattr(workspec, '_context') and \
workspec._context is not None:
context = workspec._context
else:
context = Context(work=workspec)
return context | 838de2ce25dbe44c058f5360a59e48a68fa7dc2a | 4,703 |
def test_data():
"""Get the `CIFAR-10` test data."""
global _MEAN # pylint: disable=global-statement
_np.random.seed(1)
view = _skdc10.view.OfficialImageClassificationTask()
permutation = _np.random.permutation(range(10000))
if _MEAN is None:
_MEAN = view.train.x.reshape((50000 * 32 * 32, 3)).mean(axis=0)
return ((view.test.x[:10000, :][permutation, :] - _MEAN).
transpose((0, 3, 1, 2)).astype('float32'),
view.test.y[:10000][permutation].reshape((10000, 1)).astype('float32')) | e20acfc0e46dba2441b03d0d1443fc193c500e62 | 4,704 |
def normalize_key_combo(key_combo):
"""Normalize key combination to make it easily comparable.
All aliases are converted and modifier orders are fixed to:
Control, Alt, Shift, Meta
Letters will always be read as upper-case.
Due to the native implementation of the key system, Shift pressed in
certain key combinations may yield inconsistent or unexpected results.
Therefore, it is not recommended to use Shift with non-letter keys. On OSX,
Control is swapped with Meta such that pressing Command reads as Control.
Parameters
----------
key_combo : str
Key combination.
Returns
-------
normalized_key_combo : str
Normalized key combination.
"""
key, modifiers = parse_key_combo(key_combo)
if len(key) != 1 and key not in SPECIAL_KEYS:
raise TypeError(f'invalid key {key}')
for modifier in modifiers:
if modifier not in MODIFIER_KEYS:
raise TypeError(f'invalid modifier key {modifier}')
return components_to_key_combo(key, modifiers) | e242c6d9177d31c60a534e9734917c6fdf2de9f7 | 4,705 |
def shape_to_np(shape, dtype="int"):
"""
Used to convert from a shape object returned by dlib to an np array
"""
return np.array([[shape.part(i).x, shape.part(i).y] for i in range(68)], dtype=dtype) | 6d3d0205a8ac90dc8fb17b844fd5e150e25bdde1 | 4,706 |
def inet_pton(space, address):
""" Converts a human readable IP
address to its packed in_addr representation"""
n = rsocket.inet_pton(rsocket.AF_INET, address)
return space.newstr(n) | d015f76ab252e8f1f9f8f764bb7a2131f9ca9b92 | 4,707 |
def delete_routing_segmentation_maps_from_source_segment(
self,
segment_id: int,
) -> bool:
"""Delete D-NAT policies for specific source segment
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - vrf
- DELETE
- /vrf/config/maps/{srcSegmentId}
:param segment_id: Numeric id of routing segment
:type segment_id: int
:return: Returns True/False based on successful call
:rtype: bool
"""
return self._delete(
"/vrf/config/maps/{}".format(segment_id),
expected_status=[204],
return_type="bool",
) | 32064ca159928ccc0802791e161a614f3303555f | 4,708 |
import os
def load_bounding_boxes(dataset_dir):
"""
Load bounding boxes and return a dictionary of file names and corresponding bounding boxes
"""
# Paths
bounding_boxes_path = os.path.join(dataset_dir, 'bounding_boxes.txt')
file_paths_path = os.path.join(dataset_dir, 'images.txt')
# Read bounding_boxes.txt and images.txt file
df_bounding_boxes = pd.read_csv(bounding_boxes_path,
delim_whitespace=True, header=None).astype(int)
df_file_names = pd.read_csv(file_paths_path, delim_whitespace=True, header=None)
# Create a list of file names
file_names = df_file_names[1].tolist()
# Create a dictionary of file_names and bounding boxes
filename_boundingbox_dict = {img_file[:-4]: [] for img_file in file_names[:2]}
# Assign a bounding box to the corresponding image
for i in range(0, len(file_names)):
# Get the bounding box
bounding_box = df_bounding_boxes.iloc[i][1:].tolist()
key = file_names[i][:-4]
filename_boundingbox_dict[key] = bounding_box
return filename_boundingbox_dict | ed6e4b1d049da25dc975fcd1406e4c17dbe09a70 | 4,709 |
def _identifier(name):
"""
:param name: string
:return: name in lower case and with '_' instead of '-'
:rtype: string
"""
if name.isidentifier():
return name
return name.lower().lstrip('0123456789. ').replace('-', '_') | fbbbc9dd3f2bc5b6e43520c0685f63a10ee95f0a | 4,710 |
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like of shape(M,)
Rank-1 array of polynomial co-efficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError:
When `p` cannot be converted to a rank-1 array.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> print np.roots(coeff)
[-0.3125+0.46351241j -0.3125-0.46351241j]
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError,"Input must be a rank-1 array."
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots | 02e3f37a81c84aac9ac949662ec64b85e24432c9 | 4,711 |
from typing import List
def calculate_trade_from_swaps(
swaps: List[AMMSwap],
trade_index: int = 0,
) -> AMMTrade:
"""Given a list of 1 or more AMMSwap (swap) return an AMMTrade (trade).
The trade is calculated using the first swap token (QUOTE) and last swap
token (BASE). Be aware that any token data in between will be ignored for
calculating the trade.
Examples:
[USDC -> AMPL] BASE_QUOTE pair is AMPL_USDC.
[USDC -> AMPL, AMPL -> WETH] BASE_QUOTE pair is WETH_USDC.
[USDC -> AMPL, AMPL -> WETH, WETH -> USDC] BASE_QUOTE pair is USDC_USDC.
May raise DeserializationError
"""
assert len(swaps) != 0, "Swaps can't be an empty list here"
if swaps[0].amount0_in == ZERO:
# Prevent a division by zero error when creating the trade.
# Swaps with `tokenIn` amount (<AMMSwap>.amount0_in) equals to zero are
# not expected nor supported. The function `deserialize_swap` will raise
# a DeserializationError, preventing to store them in the DB. In case
# of having a zero amount it means the db data was corrupted.
log.error(
'Failed to deserialize swap from db. First swap amount0_in is zero',
swaps=swaps,
)
raise DeserializationError('First swap amount0_in is zero.')
amm_trade = AMMTrade(
trade_type=TradeType.BUY, # AMMTrade is always a buy
base_asset=swaps[-1].token1,
quote_asset=swaps[0].token0,
amount=swaps[-1].amount1_out,
rate=Price(swaps[0].amount0_in / swaps[-1].amount1_out),
swaps=swaps,
trade_index=trade_index,
)
return amm_trade | 55071041fd0cab3fd2c0cb89f24cd9267a4e164a | 4,712 |
def tokenize(s):
"""
Tokenize a string.
Args:
s: String to be tokenized.
Returns:
A list of words as the result of tokenization.
"""
#return s.split(" ")
return nltk.word_tokenize(s) | 8dcc01364b3442539dbcc979d3238492bb7904d1 | 4,713 |
from datetime import datetime
def evaluate(request):
"""Eval view that shows how many times each entry was tracked"""
# default filter
end_date = datetime.date.today()
start_date = datetime.date(year=end_date.year, month=end_date.month - 1, day=end_date.day)
num_entries = 5
# get custom filter values from form
if request.method == 'POST':
form = PlotForm(request.POST)
if form.is_valid():
start_date = form.cleaned_data['start_date']
end_date = form.cleaned_data['end_date']
num_entries = form.cleaned_data['num_entries']
# or load empty form
else:
form = PlotForm(initial={'start_date': start_date, 'end_date': end_date, 'num_entries': num_entries})
# prepare chart data
labels = []
chart_data = []
entry_counts = most_frequent_entries(request.user, start_date, end_date, number=num_entries)
for entry, count in entry_counts.items():
labels.append(entry)
chart_data.append(count)
context = {
'form': form,
# for chart.js
'labels': labels,
'chart_label': 'Num. Entries',
'chart_data': chart_data,
'chart_title': f'Top {num_entries} Most Common Entries',
}
return render(request, 'app/eval.html', context) | 44708b65846fd9e21ebc7baf1fe0377054ae2221 | 4,714 |
def plot_af_correlation(vf1, vf2, ax=None, figsize=None):
"""
Create a scatter plot showing the correlation of allele frequency between
two VCF files.
This method will exclude the following sites:
- non-onverlapping sites
- multiallelic sites
- sites with one or more missing genotypes
Parameters
----------
vf1, vf2 : VcfFrame
VcfFrame objects to be compared.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
.. plot::
:context: close-figs
>>> from fuc import pyvcf, common
>>> import matplotlib.pyplot as plt
>>> data1 = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102, 103, 104, 105],
... 'ID': ['.', '.', '.', '.', '.', '.'],
... 'REF': ['G', 'T', 'G', 'T', 'A', 'C'],
... 'ALT': ['A', 'C', 'C', 'G,A', 'C', 'T'],
... 'QUAL': ['.', '.', '.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.', '.', '.'],
... 'FORMAT': ['GT:DP', 'GT', 'GT', 'GT', 'GT', 'GT'],
... 'A': ['0/1:30', '0/0', '1/1', '0/1', '1/1', '0/1'],
... 'B': ['0/0:30', '0/0', '0/1', '0/1', '1/1', '0/1'],
... 'C': ['1/1:30', '0/0', '1/1', '0/1', '1/1', '0/1'],
... 'D': ['0/0:30', '0/0', '0/0', '0/0', '1/1', '0/1'],
... 'E': ['0/0:30', '0/0', '0/0', '1/2', '1/1', '0/1'],
... }
>>> vf1 = pyvcf.VcfFrame.from_dict([], data1)
>>> data2 = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [101, 102, 103, 104, 105],
... 'ID': ['.', '.', '.', '.', '.'],
... 'REF': ['T', 'G', 'T', 'A', 'C'],
... 'ALT': ['C', 'C', 'G,A', 'C', 'T'],
... 'QUAL': ['.', '.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT', 'GT', 'GT'],
... 'F': ['0/0', '0/1', '0/1', '1/1', '0/0'],
... 'G': ['0/0', '0/1', '0/1', '1/1', './.'],
... 'H': ['0/0', '0/1', '0/1', '1/1', '1/1'],
... 'I': ['0/0', '0/1', '0/0', '1/1', '1/1'],
... 'J': ['0/0', '0/1', '1/2', '1/1', '0/1'],
... }
>>> vf2 = pyvcf.VcfFrame.from_dict([], data2)
>>> pyvcf.plot_af_correlation(vf1, vf2)
>>> plt.tight_layout()
"""
def one_gt(g):
alleles = g.split(':')[0].split('/')
alleles = [x for x in alleles if x != '0']
return len(alleles)
def one_row(r):
locus = f'{r.CHROM}-{r.POS}-{r.REF}-{r.ALT}'
ac = r[9:].apply(one_gt).sum()
if 'X' in r.CHROM or 'Y' in r.CHROM:
total = len(r[9:])
else:
total = len(r[9:]) * 2
af = ac / total
return pd.Series([locus, af])
s1 = vf1.filter_multialt().filter_empty(threshold=1).df.apply(one_row, axis=1)
s2 = vf2.filter_multialt().filter_empty(threshold=1).df.apply(one_row, axis=1)
s1.columns = ['Locus', 'First']
s2.columns = ['Locus', 'Second']
s1 = s1.set_index('Locus')
s2 = s2.set_index('Locus')
df = pd.concat([s1, s2], axis=1).dropna()
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
sns.scatterplot(data=df, x='First', y='Second', ax=ax)
return ax | aadf3b7cd226e04c0bdbf26c737831b515d7e6c9 | 4,715 |
def significant_pc_test(adata, p_cutoff=0.1, update=True, obsm='X_pca', downsample=50000):
"""
Parameters
----------
adata
p_cutoff
update
obsm
downsample
Returns
-------
"""
pcs = adata.obsm[obsm]
if pcs.shape[0] > downsample:
print(f'Downsample PC matrix to {downsample} cells to calculate significant PC components')
use_pcs = pd.DataFrame(pcs).sample(downsample).values
else:
use_pcs = pcs
i = 0
for i in range(use_pcs.shape[1] - 1):
cur_pc = use_pcs[:, i]
next_pc = use_pcs[:, i + 1]
p = ks_2samp(cur_pc, next_pc).pvalue
if p > p_cutoff:
break
n_components = min(i + 1, use_pcs.shape[1])
print(f'{n_components} components passed P cutoff of {p_cutoff}.')
if update:
adata.obsm[obsm] = pcs[:, :n_components]
print(f"Changing adata.obsm['X_pca'] from shape {pcs.shape} to {adata.obsm[obsm].shape}")
return n_components | c8e367c53330bcb959fb7baba9649d090de91389 | 4,716 |
from sys import path
import tqdm
def files_from_output(folder):
"""Get list of result files from output log."""
files = []
with open(path.join(folder, "OUTPUT.out")) as out_file:
for line in tqdm(out_file.readlines(), desc="Read files from output"):
if line.find("+ -o") != -1:
files.append(line.replace(
"+ -o\t", "").replace("results/", "").strip())
elif line.find("+++ TASK ->") != -1 and line.find("output=") != -1:
chunks = line.split("\t")
for chunk in chunks:
if chunk.find("output=") != -1:
files.append(chunk.replace("output=", "").strip())
return files | a76db67ef6484773f216163b8f27e1741856892d | 4,717 |
def unique_hurricanes(hurdat):
"""
Returns header info for each unique hurricanes in HURDAT2-formatted text
file hurdat.
"""
#split on returns if hurdat is not a list
if not isinstance(hurdat, list):
hurdat = hurdat.split('\n')
header_rows = [parse_header(
line, line_num
) for line_num, line in enumerate(hurdat) if parse_header(
line, line_num
)]
keys = [h.keys()[0] for h in header_rows]
values = [h.values()[0] for h in header_rows]
return {k: v for k, v in zip(keys, values)} | c87561b80f6c8b70c33d64834c4d289508a2c120 | 4,718 |
import os
def find_package_data():
"""
Find package_data.
"""
theme_dirs = []
for dir, subdirs, files in os.walk(pjoin('jupyterlab', 'themes')):
slice_len = len('jupyterlab' + os.sep)
theme_dirs.append(pjoin(dir[slice_len:], '*'))
schema_dirs = []
for dir, subdirs, files in os.walk(pjoin('jupyterlab', 'schemas')):
slice_len = len('jupyterlab' + os.sep)
schema_dirs.append(pjoin(dir[slice_len:], '*'))
return {
'jupyterlab': ['build/*', '*.js', 'package.app.json',
'yarn.lock', 'yarn.app.lock', '.yarnrc'
] + theme_dirs + schema_dirs
} | b0becf06f363723723d99ca58819cd1311a918ef | 4,719 |
def delete_models_shares_groups(id, group_id, client=None):
"""Revoke the permissions a group has on this object
Use this function on both training and scoring jobs.
Parameters
----------
id : integer
The ID of the resource that is shared.
group_id : integer
The ID of the group.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
Returns
-------
None
Response code 204: success
"""
return _unshare_model(id, group_id, entity_type='groups', client=client) | 59f3391e6e92fe0bf2f4c204a9da7c55a8ac8c6c | 4,720 |
def step1ddiffusionanalytical(q, dt, alpha, beta, prng=np.random, **kwargs):
"""Analytical time stepping as proposed in Jenkins, Spano arXiv:1506.06998
Uses the asymptotic normality of the death process for small times
(see Griffiths, J. Math. Bio, 1984)
"""
theta = alpha+beta
beta_ = 0.5*(theta-1.0)*dt
if beta_ == 0.0:
eta = 1.0
sigma = (2.0/(3.0*dt))**.5
else:
eta = beta_/np.expm1(beta_)
# calculation can sometimes give negative numbers due to numerical precision
factor = max(0, 2.0*eta/dt *(1.0 + eta/(eta+beta_)-2.0*eta))
sigma = max((eta+beta_) * factor**.5 / beta_, 1e-16)
mu = 2.0*eta/dt
m = max(int(round(prng.normal(mu, sigma))), 0)
l = prng.binomial(m, q)
qnew = prng.beta(alpha+l, beta+m-l)
return qnew | ae1034488250a7a0afc184878496cd656b239016 | 4,721 |
def no_vtk():
""" Checks if VTK is installed and the python wrapper is functional """
global _vtk_version
return _vtk_version is None | 654dfd0f10a36bbfd3e46c5a93f84a9234e8c0ca | 4,722 |
def get_request_list(flow_list: list) -> list:
"""
将flow list转换为request list。在mitmproxy中,flow是对request和response的总称,这个功能只获取request。
:param flow_list: flow的列表
:return: request的列表
"""
req_list = []
for flow in flow_list:
request = flow.get("request")
req_list.append(request)
return req_list | a70e0120ef2be88bd0644b82317a2a0748352c6c | 4,723 |
from typing import Tuple
import logging
def query_total_production(start_date, end_date) -> Tuple[int]:
"""Total count of semi production on the given time interval"""
semi_count = None
fg_count = None
try:
with stSession() as s:
semi_count = (
s.query(ProductionScan)
.filter(
sa.and_(
ProductionScan.date >= start_date,
ProductionScan.date <= end_date,
)
)
.count()
)
fg_count = (
s.query(StorageScan)
.filter(
sa.and_(
StorageScan.date >= start_date,
StorageScan.date <= end_date,
)
)
.count()
)
except sa.exc.OperationalError as e:
logging.error(f"Operational error occured\n{e}")
return None
except Exception as e:
logging.error("Unknown Error", exc_info=True)
return None
finally:
s.close()
return (semi_count, fg_count) | 4ecf7b2e70feaa75456550deca6a5b8a326adc11 | 4,724 |
import pytz
def add_fields(_, level, event_dict):
""" Add custom fields to each record. """
now = dt.datetime.now()
event_dict['timestamp'] = TZ.localize(now, True).astimezone(pytz.utc).isoformat()
event_dict['level'] = level
if session:
event_dict['session_id'] = session.get('session_id')
if request:
try:
event_dict['ip_address'] = request.headers['X-Forwarded-For'].split(',')[0].strip()
except Exception:
event_dict['ip_address'] = 'unknown'
return event_dict | 3efbffc2808a048fde80a3655e28417c39f2ad04 | 4,725 |
def Smith_set(A,P,params,election_ID,printing_wanted=False):
"""
Compute and return a list of the candidates in the Smith set.
This is the smallest set of candidates such that every candidate in the
Smith set beats every candidate not in the Smith set in one-on-one contests.
In this implementation, "a beats b" if at least half the voters prefer a to b.
Thus, a beats b and vice versa if they are tied; this gives probably the most
reasonable notion for a Smith set when there are ties.
The algorithm uses the fact that the Smith set will be the *last*
strongly connected component discovered by the usual DFS SCC algorithm.
Here A = set of alternatives (candidates), and
P = profile (dict mapping ballots to counts).
"""
if printing_wanted:
print "%s: Computing Smith set."%election_ID
pref = pairwise_prefs(A,P,params) # pref[(i,j)] gives number preferring i to j
n = number_of_ballots_in_profile(P)
stack = []
in_stack = set()
index = 0 # DFS node counter
I = { } # gives indices of vertics
L = { } # gives lowlinks of vertices
for a in A:
if not I.has_key(a): # Start a DFS at each node we haven't seen yet
(index,scc)=Smith_aux(a,A,index,I,L,stack,in_stack,pref,n)
scc = sorted(scc)
if printing_wanted:
print indent+"Smith set is: "+string.join(scc)
return scc | eb71ee5ae402d732a3bea804aad5b39fe3bd92a2 | 4,726 |
from typing import Callable
from typing import Coroutine
from typing import Any
def run_async_from_thread(func: Callable[..., Coroutine[Any, Any, T_Retval]], *args) -> T_Retval:
"""
Call a coroutine function from a worker thread.
:param func: a coroutine function
:param args: positional arguments for the callable
:return: the return value of the coroutine function
"""
try:
asynclib = _local.current_async_module
except AttributeError:
raise RuntimeError('This function can only be run from an AnyIO worker thread')
return asynclib.run_async_from_thread(func, *args) | 829a9008e8aa058b66cb637db71f8f8eb8499374 | 4,727 |
def check_tensor_shape(tensor_tf, target_shape):
""" Return a Tensorflow boolean graph that indicates whether
sample[features_key] has the specified target shape. Only check
not None entries of target_shape.
:param tensor_tf: Tensor to check shape for.
:param target_shape: Target shape to compare tensor to.
:returns: True if shape is valid, False otherwise (as TF boolean).
"""
result = tf.constant(True)
for i, target_length in enumerate(target_shape):
if target_length:
result = tf.logical_and(
result,
tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[i]))
return result | 8b9938c67f2e3655f9ff4dac08261fb6e5803af2 | 4,728 |
def LabelAddressPlus(ea, name, force=False, append_once=False, unnamed=False, nousername=False, named=False, throw=False):
"""
Label an address with name (forced) or an alternative_01
:param ea: address
:param name: desired name
:param force: force name (displace existing name)
:param append_once: append `name` if not already ending with `name`
:param named: [str, callable(addr, name)] name for things with existing usernames
:return: success as bool
"""
def ThrowOnFailure(result):
if not result and throw:
raise RuntimeError("Couldn't label address {:x} with \"{}\"".format(ea, name))
return result
def MakeUniqueLabel(name, ea=idc.BADADDR):
fnLoc = idc.get_name_ea_simple(name)
if fnLoc == idc.BADADDR or fnLoc == ea:
return name
fmt = "%s_%%i" % name
for i in range(100000):
tmpName = fmt % i
fnLoc = idc.get_name_ea_simple(tmpName)
if fnLoc == idc.BADADDR or fnLoc == ea:
return tmpName
return ""
if nousername:
unnamed = nousername
if ea < idc.BADADDR:
if HasUserName(ea):
if named:
if callable(named):
_name = idc.get_name(ea)
_name = named(ea, _name, name)
else:
name = named
elif unnamed:
return
fnName = idc.get_name(ea)
if append_once:
if not fnName.endswith(name):
name += fnName
else:
return ThrowOnFailure(False)
fnLoc = idc.get_name_ea_simple(name)
if fnLoc == idc.BADADDR:
return ThrowOnFailure(idc.set_name(ea, name, idc.SN_NOWARN))
elif fnLoc == ea:
return ThrowOnFailure(True)
else:
if force:
idc.set_name(fnLoc, "", idc.SN_AUTO | idc.SN_NOWARN)
idc.Wait()
return ThrowOnFailure(idc.set_name(ea, name, idc.SN_NOWARN))
else:
name = MakeUniqueLabel(name, ea)
return ThrowOnFailure(idc.set_name(ea, name, idc.SN_NOWARN))
else:
print("0x0%0x: Couldn't label %s, BADADDR" % (ea, name))
return False | 4772fa25c482eb10abdfea6aa9542f50827c9346 | 4,729 |
def do_match(station1, station2, latitude, elevation, distance):
"""
Perform the match between two stations.
Do initial latitude check to speed up the test
(not longitude as this isn't a constant distance)
Return probabilities for elevation, separation and Jaccard Index
:param Station Class station1:
:param Station Class station2:
:returns:
list of 3 probabilities [elev, dist, jaccard]
"""
# latitude - pre check to make quicker
if np.abs(station1.lat - station2.lat) > LATITUDE_THRESHOLD:
return False
# elevation
height = np.abs(station1.elev - station2.elev)
if height < (ELEVATION_THRESHOLD*4):
height_Pr = np.exp(-1.0 * height / ELEVATION_THRESHOLD)
else:
height_Pr = 0
# latitude & longitude
distance, bearing = utils.get_dist_and_bearing([station1.lat, station1.lon],[station2.lat, station2.lon])
if distance < (DISTANCE_THRESHOLD*4):
dist_Pr = np.exp(-1.0 * distance / DISTANCE_THRESHOLD)
else:
dist_Pr = 0.
# Jaccard Index on name - remove all whitespace
jac_Pr = jaccard(station1.name.strip(), station2.name.strip())
# Jaccard Index on METAR call sign
if station1.call != "" and station2.call != "":
jac_Pr_metar = jaccard(station1.call, station2.call)
# name matching
return [height_Pr, dist_Pr, jac_Pr] | 078d04117363087a512449497713c487bc1180e4 | 4,730 |
def rotation_matrix_from_vectors(vec1, vec2):
""" Find the rotation matrix that aligns vec1 to vec2
Args
----
vec1 (numpy.ndarray): A 3d "source" vector
vec2 (numpy.ndarray): A 3d "destination" vector
Returns
-------
numpy.ndarray: A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
"""
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix | 9568378e309c5da6e6dffee4788e07eb0c2ea189 | 4,731 |
import os
def get_file_from_cache_if_exists(file_path,
update_modification_time_on_access=True):
"""Get file from nfs cache if available."""
cache_file_path = get_cache_file_path(file_path)
if not cache_file_path or not file_exists_in_cache(cache_file_path):
# If the file does not exist in cache, bail out.
return False
# Fetch cache file size before starting the actual copy.
cache_file_size = get_cache_file_size_from_metadata(cache_file_path)
# Copy file from cache to local.
if not shell.copy_file(cache_file_path, file_path):
return False
# Update timestamp to later help with eviction of old files.
if update_modification_time_on_access:
update_access_and_modification_timestamp(cache_file_path)
# Return success or failure based on existence of local file and size
# comparison.
return (os.path.exists(file_path) and
os.path.getsize(file_path) == cache_file_size) | 98bb16eb964483b2bcb9bcad02463042fc2c18b2 | 4,732 |
def audio(src, type="audio/ogg", other_attr={}):
"""
add audio file
args:
src <str> : source file
type <str> : type of audio file
other_attr <dict> : other attributes
"""
return f"""
<audio {_parse_attr(other_attr)}>
<source src="{src}" type="{type}">
</audio>
""".strip() | 3ccd8aea6d7257c46336bb81184cf4b7f379624e | 4,733 |
def test_triangle(dim):
"""
Tests if dimensions can come from a triangle.
dim is a list or tuple of the three dimensions
"""
dim = [int(x) for x in dim]
dim.sort()
if dim[0] + dim[1] > dim[2]:
return True
else:
return False | fc5bc8f7d3830da0ae8692d7cf65a72bcfe2ba7d | 4,734 |
from typing import List
def arg_parser(data: str):
"""parse "x[a1, a2, a3], y[k1=a1, a2, k3=a3], z"
nested [] are ignored.
"""
res: List[NameWithAttrs] = _ARG_WITH_ATTR_PARSER.parse(data)
return res | fa530584a96829944562d2c08bdfed34bfa3eec4 | 4,735 |
def _get_resource(span):
"""Get resource name for span"""
if "http.method" in span.attributes:
route = span.attributes.get("http.route")
return (
span.attributes["http.method"] + " " + route
if route
else span.attributes["http.method"]
)
return span.name | 71b4d2e568350ccfb436bbff6e7a2cff1f3cb251 | 4,736 |
def get_draw_title(kdata):
"""根据typ值,返回相应的标题,如 上证指数(日线)
参数:kdata: KData实例
返回:一个包含stock名称的字符串,可用作绘图时的标题
"""
if not kdata:
return ""
query = kdata.getQuery()
stock = kdata.getStock()
if stock.isNull():
return ""
s1 = ''
if query.kType == KQuery.KType.DAY:
s1 = u' (日线)'
elif query.kType == KQuery.KType.WEEK:
s1 = u' (周线)'
elif query.kType == KQuery.KType.MONTH:
s1 = u' (月线)'
elif query.kType == KQuery.KType.QUARTER:
s1 = u' (季线)'
elif query.kType == KQuery.KType.HALFYEAR:
s1 = u' (半年线)'
elif query.kType == KQuery.KType.YEAR:
s1 = u' (年线)'
elif query.kType == KQuery.KType.MIN:
s1 = u' (1分钟线)'
elif query.kType == KQuery.KType.MIN5:
s1 = u' (5分钟线)'
elif query.kType == KQuery.KType.MIN15:
s1 = u' (15分钟线)'
elif query.kType == KQuery.KType.MIN30:
s1 = u' (30分钟线)'
elif query.kType == KQuery.KType.MIN60:
s1 = u' (60分钟线)'
name = stock.name
if stock.code == "":
stitle = "Block(%s) %s" % (stock.id, name) + s1
else:
stitle = stock.market + stock.code + ' ' + name + s1
return stitle | 7c661b63cedb477224d7f5ea9d7c182108f801a5 | 4,737 |
def _B(slot):
"""Convert slot to Byte boundary"""
return slot*2 | 97f13e9fd99989a83e32f635193a0058656df68b | 4,738 |
import torch
def nll(perm, true):
"""
perm: (n, n) or (s, n, n)
true: (n)
"""
n = true.size(-1)
# i = torch.arange(n, device=perm.device)
# j = true.to(perm.device)
# print("perm.nll:", perm.size(), true.size())
elements = perm.cpu()[..., torch.arange(n), true]
# elements = perm.cpu()[torch.arange(n), true]
nll = -torch.sum(torch.log2(elements.to(perm.device)))
if perm.dim() == 3: # normalize by number samples
nll = nll / perm.size(0)
# print("nll", nll)
return nll | a63c95e814529539ecd964f4309ea96f78cfcbb1 | 4,739 |
def _peaks_colors_from_points(points, colors=None, points_per_line=2):
"""
Returns a VTK scalar array containing colors information for each one of
the peaks according to the policy defined by the parameter colors.
Parameters
----------
points : (N, 3) array or ndarray
points coordinates array.
colors : None or string ('rgb_standard') or tuple (3D or 4D) or
array/ndarray (N, 3 or 4) or array/ndarray (K, 3 or 4) or
array/ndarray(N, ) or array/ndarray (K, )
If None a standard orientation colormap is used for every line.
If one tuple of color is used. Then all streamlines will have the same
color.
If an array (N, 3 or 4) is given, where N is equal to the number of
points. Then every point is colored with a different RGB(A) color.
If an array (K, 3 or 4) is given, where K is equal to the number of
lines. Then every line is colored with a different RGB(A) color.
If an array (N, ) is given, where N is the number of points then these
are considered as the values to be used by the colormap.
If an array (K,) is given, where K is the number of lines then these
are considered as the values to be used by the colormap.
points_per_line : int (1 or 2), optional
number of points per peak direction.
Returns
-------
color_array : vtkDataArray
vtk scalar array with name 'colors'.
colors_are_scalars : bool
indicates whether or not the colors are scalars to be interpreted by a
colormap.
global_opacity : float
returns 1 if the colors array doesn't contain opacity otherwise -1.
"""
num_pnts = len(points)
num_lines = num_pnts // points_per_line
colors_are_scalars = False
global_opacity = 1
if colors is None or colors == 'rgb_standard':
# Automatic RGB colors
colors = np.asarray((0, 0, 0))
color_array = numpy_to_vtk_colors(np.tile(255 * colors, (num_pnts, 1)))
elif type(colors) is tuple:
global_opacity = 1 if len(colors) == 3 else -1
colors = np.asarray(colors)
color_array = numpy_to_vtk_colors(np.tile(255 * colors, (num_pnts, 1)))
else:
colors = np.asarray(colors)
if len(colors) == num_lines:
pnts_colors = np.repeat(colors, points_per_line, axis=0)
if colors.ndim == 1: # Scalar per line
color_array = numpy_support.numpy_to_vtk(pnts_colors,
deep=True)
colors_are_scalars = True
elif colors.ndim == 2: # RGB(A) color per line
global_opacity = 1 if colors.shape[1] == 3 else -1
color_array = numpy_to_vtk_colors(255 * pnts_colors)
elif len(colors) == num_pnts:
if colors.ndim == 1: # Scalar per point
color_array = numpy_support.numpy_to_vtk(colors, deep=True)
colors_are_scalars = True
elif colors.ndim == 2: # RGB(A) color per point
global_opacity = 1 if colors.shape[1] == 3 else -1
color_array = numpy_to_vtk_colors(255 * colors)
color_array.SetName('colors')
return color_array, colors_are_scalars, global_opacity | 7abc5be4739164dc225081ec321d1cb591f74bae | 4,740 |
def epi_reg(epi, t1, t1brain, out='epi_reg', **kwargs):
"""Wrapper for the ``epi_reg`` command.
:arg epi: Input EPI image
:arg t1: Input wholehead T1 image
:arg t1brain: Input brain extracted T1 image
:arg out: Output name
"""
asrt.assertIsNifti(epi)
asrt.assertIsNifti(t1)
asrt.assertIsNifti(t1brain)
valmap = {
'nofmapreg' : wutils.SHOW_IF_TRUE,
'noclean' : wutils.SHOW_IF_TRUE,
'v' : wutils.SHOW_IF_TRUE,
}
cmd = ['epi_reg', '--epi='+epi, '--t1='+t1, '--t1brain='+t1brain, '--out='+out]
cmd += wutils.applyArgStyle('--=',
valmap=valmap,
singlechar_args=True,
**kwargs)
return cmd | 1d19f0efcfb4fcfc7293f294978d11811861a06b | 4,741 |
import pathlib
import json
def load_towns():
"""Sample of Wikipedia dataset that contains informations about Toulouse, Paris, Lyon and
Bordeaux.
Examples
--------
>>> from pprint import pprint as print
>>> from cherche import data
>>> towns = data.load_towns()
>>> print(towns[:3])
[{'article': 'Paris (French pronunciation: \u200b[paʁi] (listen)) is the '
'capital and most populous city of France, with an estimated '
'population of 2,175,601 residents as of 2018, in an area of more '
'than 105 square kilometres (41 square miles).',
'id': 0,
'title': 'Paris',
'url': 'https://en.wikipedia.org/wiki/Paris'},
{'article': "Since the 17th century, Paris has been one of Europe's major "
'centres of finance, diplomacy, commerce, fashion, gastronomy, '
'science, and arts.',
'id': 1,
'title': 'Paris',
'url': 'https://en.wikipedia.org/wiki/Paris'},
{'article': 'The City of Paris is the centre and seat of government of the '
'region and province of Île-de-France, or Paris Region, which has '
'an estimated population of 12,174,880, or about 18 percent of '
'the population of France as of 2017.',
'id': 2,
'title': 'Paris',
'url': 'https://en.wikipedia.org/wiki/Paris'}]
"""
with open(pathlib.Path(__file__).parent.joinpath("towns.json"), "r") as towns_json:
return json.load(towns_json) | 72aa393cfc40db5f254059d78679ea5615f494d2 | 4,742 |
def nonce_initialization(params: InitializeNonceParams) -> TransactionInstruction:
"""Generate an instruction to initialize a Nonce account.
Args:
params: The nonce initialization params.
Returns:
The instruction to initialize the nonce account.
"""
return TransactionInstruction.from_solders(ssp.initialize_nonce_account(params.to_solders())) | 99fc70fd7965443b508923013a988f96ecf7b222 | 4,743 |
def to_weeknr(date=''):
"""
Transforms a date strings YYYYMMDD to the corresponding week nr (e.g. 20200713 becomes w29)
"""
week_nr = pd.to_datetime(date).to_pydatetime().isocalendar()[1]
return f"w{week_nr}" | f9699e735be8d92e4340a23464ee54247c355ffd | 4,744 |
def build_logisticregression(X_loc, y_loc, args):
"""finds best parameters for logistic regression"""
Printer(colored('(training) ', 'green') +
'searching for best parameters for logistic regression')
# specify parameters and distributions to sample from
param_dist = {"C": np.logspace(-9, 3, 13),
"solver": ['newton-cg', 'lbfgs', 'liblinear', 'sag'],
"dual": [False],
"tol": np.logspace(-9, 3, 13)
}
clf = LogisticRegression(penalty='l2')
random_search = RandomizedSearchCV(clf, param_distributions=param_dist, scoring='accuracy', n_iter=int(args.iter), n_jobs=-1, refit=True, cv=3)
random_search.fit(X_loc, y_loc)
acc = random_search.cv_results_['mean_test_score']
filename = 'cv/logisticregression_' + str(np.mean(acc)) + '.pkl'
# save model
savemodel(random_search, filename)
# save best params
filename_param = 'cv/logisticregression_param_' + str(np.mean(acc)) + '.json'
saveparams(random_search.best_params_, filename_param)
return random_search | f63f67bc9debd2adccac39910b29ed705498dd4b | 4,745 |
import re
def load_data(experiments,
remove_outlier=True,
peptides=["A5cons",
"A6cons",
"phage_ctl_0",
"phage_ctl_1",
"phage_ctl_2",
"phage_ctl_4",
"phage_ctl_5",
"phage_ctl_6",
"phage_ctl_7",
"phage_ctl_8",
"phage_ctl_9"]):
"""
Convenience function that allows one to load a whole bunch of experiments,
with different peptides, into a single data frame.
experiments should be a list of dictionaries of the following form:
[{"protein":"hA6",
"name_in_file":"hA6_4.3",
"Kd":45,
"prot_conc":4.2,
"probe_conc":4.2,
"data_file":"13_main-collection.txt",
"plate_file":"13_plate-layout.xlsx"},...]
remove_outlier: whether or not to look for outlier points and remove them
when averaging technical reps
peptides: list of peptides. these are used to build regular expressions
to match peptides in each data file. It looks for an exact match
at the start of the string, allowing any trailing characters.
NOTE: this could lead to problems if you had peptides with names
like pep10, pep100.
"""
pep_patterns = [re.compile(f"{p}") for p in peptides]
proteins = set([e["protein"] for e in experiments])
times_pep_was_seen = dict([(protein,dict([(p,0) for p in peptides]))
for protein in proteins])
all_df = []
for expt in experiments:
df, _ = read_file(expt["data_file"],expt["plate_file"])
df = df[df.protein == expt["name_in_file"]]
peptide_Kd_scalar = get_peptide_Kd_scalar(Kd=expt["Kd"],
Mt=expt["prot_conc"],
Xt=expt["probe_conc"])
peps_in_df = np.unique(df.peptide)
for p in peps_in_df:
for pattern in pep_patterns:
if pattern.match(p):
pep_df = df[df.peptide == p]
plates = np.unique(pep_df.plate)
protein = expt["protein"]
peptide = pattern.pattern
for plate in plates:
times_pep_was_seen[protein][peptide] += 1
single_rep = pep_df[pep_df.plate == plate]
fit_df = average_tech_reps(single_rep,remove_outlier=remove_outlier)
fit_df["protein"] = protein
fit_df["peptide"] = peptide
fit_df["rep_number"] = times_pep_was_seen[protein][peptide]
fit_df["Kd_scalar"] = peptide_Kd_scalar
fit_df["plate_file"] = expt["plate_file"]
fit_df["data_file"] = expt["data_file"]
fit_df["name_in_file"] = expt["name_in_file"]
fit_df["plate_number"] = plate
all_df.append(fit_df)
break
return pd.concat(all_df) | b9d7c7be8e0bbe5f5aee785cc0b525d9a57acc8b | 4,746 |
def get_lines(clearance):
"""
Add lines per reference well interval between the closest points on the
reference well and the offset well and color them according to the
calculated Separation Factor (SF) between the two wells at these points.
Parameters
----------
clearance: welleng.clearance object
Returns
-------
lines: vedo.Lines object
A vedo.Lines object colored by the object's SF values.
"""
assert VEDO, "ImportError: try pip install welleng[easy]"
c = clearance.SF
start_points, end_points = clearance.get_lines()
lines = Lines(start_points, end_points).cmap('hot_r', c, on='cells')
lines.addScalarBar(title='SF')
return lines | 2ec0ef039647b9c72219989d00b3e92092a79c16 | 4,747 |
import hashlib
def generate_md5_hash(filepath):
"""Returns md5 hash of file.
Args:
filepath: str. Absolute path to the file.
Returns:
str. Hexadecimal hash of specified file.
"""
m = hashlib.md5()
with python_utils.open_file(filepath, 'rb', encoding=None) as f:
while True:
buf = f.read(HASH_BLOCK_SIZE)
if not buf:
break
m.update(buf)
return m.hexdigest() | d615d9ec14b79eac72168db616664f5878ca8e21 | 4,748 |
def status():
"""Return status."""
return jsonify(STATUS) | de396fdf35e42a36ed40b294a26645efba29c27a | 4,749 |
from typing import Optional
def get_entitlement(account_id: Optional[str] = None,
customer_id: Optional[str] = None,
entitlement_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEntitlementResult:
"""
Returns the requested Entitlement resource. Possible error codes: * PERMISSION_DENIED: The customer doesn't belong to the reseller. * INVALID_ARGUMENT: Required request parameters are missing or invalid. * NOT_FOUND: The customer entitlement was not found. Return value: The requested Entitlement resource.
"""
__args__ = dict()
__args__['accountId'] = account_id
__args__['customerId'] = customer_id
__args__['entitlementId'] = entitlement_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:cloudchannel/v1:getEntitlement', __args__, opts=opts, typ=GetEntitlementResult).value
return AwaitableGetEntitlementResult(
association_info=__ret__.association_info,
commitment_settings=__ret__.commitment_settings,
create_time=__ret__.create_time,
name=__ret__.name,
offer=__ret__.offer,
parameters=__ret__.parameters,
provisioned_service=__ret__.provisioned_service,
provisioning_state=__ret__.provisioning_state,
purchase_order_id=__ret__.purchase_order_id,
suspension_reasons=__ret__.suspension_reasons,
trial_settings=__ret__.trial_settings,
update_time=__ret__.update_time) | 8cc10901b90a05a4bc0089758ce297c54af48569 | 4,750 |
def skip_to_home(fxn):
""" Skips past page straight to home page if logged in
"""
@wraps(fxn)
def skipped_page_fxn(*arg, **kwargs):
if session.get('logged_in'):
return redirect(url_for('home'))
else:
return fxn(*arg, **kwargs)
return skipped_page_fxn | 9edbbc186caa93046d17c179610a9c1309f281db | 4,751 |
from pathlib import Path
def get_all_paths_from_directory(directory: Path, recursive: bool, paths: [str] = [], ) -> [Path]:
"""
Gets a list of file paths for all files in the given directory (and its subdirectories if recursive is true)
:param directory: The starting directory to get file paths from
:param recursive: Whether files in subdirectories should be included
:param paths: The list that file paths will be added to
:return: A list of file paths from the given directory (and subdirectories if recursive is true)
"""
directories = []
for file in directory.iterdir():
# If the file is a subdirectory and we are processing subdirectories, add it to the list for later processing
if file.is_dir():
if recursive:
directories.append(file)
else: # If the file is just a normal file then add it to the paths list
paths.append(file)
# If we are processing subdirectories then go through all the subdirectories and process them
if recursive:
for file in directories:
get_all_paths_from_directory(file, recursive, paths)
return paths | 95f26d94ff1656fa5e4c656ecf3e424bf29f21b0 | 4,752 |
def check_contigs_for_dupes(matches):
"""check for contigs that match more than 1 UCE locus"""
node_dupes = defaultdict(list)
for node in matches:
node_dupes[node] = len(set(matches[node]))
dupe_set = set([node for node in node_dupes if node_dupes[node] > 1])
return dupe_set | f20ab684388e38b51e193567b14a2a610d87f227 | 4,753 |
def substitute(P, x0, x1, V=0):
"""
Substitute a variable in a polynomial array.
Args:
P (Poly) : Input data.
x0 (Poly, int) : The variable to substitute. Indicated with either unit
variable, e.g. `x`, `y`, `z`, etc. or through an integer
matching the unit variables dimension, e.g. `x==0`, `y==1`,
`z==2`, etc.
x1 (Poly) : Simple polynomial to substitute `x0` in `P`. If `x1` is an
polynomial array, an error will be raised.
Returns:
(Poly) : The resulting polynomial (array) where `x0` is replaced with
`x1`.
Examples:
>>> x,y = cp.variable(2)
>>> P = cp.Poly([y*y-1, y*x])
>>> print(cp.substitute(P, y, x+1))
[q0^2+2q0, q0^2+q0]
With multiple substitutions:
>>> print(cp.substitute(P, [x,y], [y,x]))
[q0^2-1, q0q1]
"""
x0,x1 = map(Poly, [x0,x1])
dim = np.max([p.dim for p in [P,x0,x1]])
dtype = chaospy.poly.typing.dtyping(P.dtype, x0.dtype, x1.dtype)
P, x0, x1 = [chaospy.poly.dimension.setdim(p, dim) for p in [P,x0,x1]]
if x0.shape:
x0 = [x for x in x0]
else:
x0 = [x0]
if x1.shape:
x1 = [x for x in x1]
else:
x1 = [x1]
# Check if substitution is needed.
valid = False
C = [x.keys[0].index(1) for x in x0]
for key in P.keys:
if np.any([key[c] for c in C]):
valid = True
break
if not valid:
return P
dims = [tuple(np.array(x.keys[0])!=0).index(True) for x in x0]
dec = is_decomposed(P)
if not dec:
P = decompose(P)
P = chaospy.poly.dimension.dimsplit(P)
shape = P.shape
P = [p for p in chaospy.poly.shaping.flatten(P)]
for i in range(len(P)):
for j in range(len(dims)):
if P[i].keys and P[i].keys[0][dims[j]]:
P[i] = x1[j].__pow__(P[i].keys[0][dims[j]])
break
P = Poly(P, dim, None, dtype)
P = chaospy.poly.shaping.reshape(P, shape)
P = chaospy.poly.collection.prod(P, 0)
if not dec:
P = chaospy.poly.collection.sum(P, 0)
return P | dd176877f8663e7efb3ae99babf29726dbda025b | 4,754 |
def munkres(costs):
"""
Entry method to solve the assignment problem.
costs: list of non-infinite values entries of the cost matrix
[(i,j,value)...]
"""
solver = Munkres(costs)
return solver.munkres() | 583dfc977c8f97fd5a3c4c82e21ae6626f4a763b | 4,755 |
import torch
def compute_mean_std(dataset):
"""
https://stats.stackexchange.com/questions/25848/how-to-sum-a-standard-deviation
"""
# global_mean = np.zeros((3 * 64), dtype=np.float64)
# global_var = np.zeros((3 * 64), dtype=np.float64)
n_items = 0
s = RunningStatistics()
for image_fname in dataset:
dct_file = np.load(fs.change_extension(image_fname, ".npz"))
y = torch.from_numpy(dct_file["dct_y"])
cb = torch.from_numpy(dct_file["dct_cb"])
cr = torch.from_numpy(dct_file["dct_cr"])
dct = torch.stack([y, cb, cr], dim=0).unsqueeze(0).float()
dct = sd2(dct)[0]
s.update(dct)
# dct = to_numpy()
# global_mean += dct.mean(axis=(1, 2))
# global_var += dct.std(axis=(1, 2)) ** 2
# n_items += 1
return s.mean, s.std | 83f10fc58e83b41a542fbd088895304b0d0521b5 | 4,756 |
def test_clean_connections_p0(monkeypatch):
"""Add a connection, fake a closed thread and make sure it is removed."""
db_disconnect_all()
class mock_connection():
def __init__(self) -> None: self.value = _MOCK_VALUE_1
def close(self): self.value = None
def mock_connect(*args, **kwargs): return mock_connection()
monkeypatch.setattr(database, 'connect', mock_connect)
db_connect(_MOCK_DBNAME, _MOCK_CONFIG)
monkeypatch.setitem(database._connections, _MOCK_CONFIG['host'], {_MOCK_DBNAME: {1234: None}})
_clean_connections()
assert database._connections[_MOCK_CONFIG['host']][_MOCK_DBNAME].get(1234, None) is None | 9c8c7155566170a3598edcb8a9d7441630545522 | 4,757 |
def add(request):
"""
Add contact information.
**Templates:**
* ``rolodex/add.html``
**Template Variables:**
* form
* results: the list of similar names to allow user to check for dupes
* name: the new name that is submitted
"""
results = []
name = None
if request.method == 'POST':
form = NameForm(request.POST)
if form.is_valid():
request.session['post_data'] = request.POST
# search to see if contact already exists
name = form.cleaned_data['name']
results = Alias.objects.filter(name=name)
if not results:
return HttpResponseRedirect('../add-proceed/')
else:
form = NameForm()
return render_to_response('rolodex/add.html', {
'form': form,
'results': results,
'name': name},
RequestContext(request, {}),
) | b0fdb73f2362dc0a82d46529727cfb3b0093b8e0 | 4,758 |
def convert_total (letter1,number1, letter2, number2):
"""
Description
-----------
Converting the letter of a column and the number of a line from an exceldata to a range
Context
----------
is called in wrapp_ProcessUnits and wrapp_SystemData
Parameters
----------
letter1 : String, "A", "B" etc.
number1 : Integer
letter2 : String, "A", "B" etc.
number2 : Integer
Returns
-------
None.
"""
Range = range (convert_numbers(number1), convert_numbers(number2)+1), range(convert_letters(letter1)-1, convert_letters(letter2))
return(Range) | 51cf6480d92fa1d23841dd5605d024548837df5c | 4,759 |
def scale_facet_list(facet_list, scale):
"""
Scale list of facets by the given scaling factor
"""
new_facet_list = []
for facet in facet_list:
new_facet_list.append(scale_facet(facet, scale))
return new_facet_list | 1b1d34803db191b94fc082685718c08895e2ba28 | 4,760 |
def move_lines_to_index(uwline_index_to, lineno, uwlines, lines):
"""Method moves all lines in the list to the proper index of uwlines and
update lineno on these lines. This is useful when you want to change the
order of code lines. But note: it is not updating lineno on other lines
@:returns positions (indexes) from original source where
lines are taken from
"""
# saving positions of imports here, that will be used for restoring 'lineno'
lineno_where_line_was_taken_from = list()
for line in lines:
lineno_where_line_was_taken_from.append(line.lineno)
for token in line.tokens:
# here we will restore correct lineno for moved lines
token.node.lineno = lineno
# hack to remove newlines between imports that we moved to top
pytree_utils.SetNodeAnnotation(token.node,
pytree_utils.Annotation.NEWLINES, 0)
lineno += get_lineno_delta(token)
# need to update lineno on import lines to have consistency
lineno += 1
# filtering moved values and removing them from uwlines
uwlines[:] = [line for line in uwlines if line not in lines]
uwlines[uwline_index_to:uwline_index_to] = lines
return lineno_where_line_was_taken_from | e96f3b9da77468a31275e6255cd08ffa9309fc60 | 4,761 |
def birch(V, E0, B0, BP, V0):
"""
From Intermetallic compounds: Principles and Practice, Vol. I: Principles
Chapter 9 pages 195-210 by M. Mehl. B. Klein, D. Papaconstantopoulos paper downloaded from Web
case where n=0
"""
E = (E0
+ 9.0/8.0*B0*V0*((V0/V)**(2.0/3.0) - 1.0)**2
+ 9.0/16.0*B0*V0*(BP-4.)*((V0/V)**(2.0/3.0) - 1.0)**3)
return E | 6515e2b0b78dfcdc1d7743f3d5a7010fce920aea | 4,762 |
from typing import Set
from typing import Tuple
def debloat(edges: set, nodes: int, threshold: tuple = (0.95, 0.95)) -> Set[Tuple[str, str]]:
"""Remove nodes with inflow and/or ourflow > threshold"""
df = pd.DataFrame(list(edges), columns=["source", "target"])
checkpoint_shape = df.shape[0]
df_inflow = df.groupby("target").count().reset_index().rename(columns={"source": "inflow"})
df_outflow = df.groupby("source").count().reset_index().rename(columns={"target": "outflow"})
df = df.merge(df_inflow, on="target", how="left")
df = df.merge(df_outflow, on="source", how="left")
df["inflow_ratio"] = df["inflow"] / nodes
df["outflow_ratio"] = df["outflow"] / nodes
df = df[(df["inflow_ratio"] <= threshold[0]) & (df["outflow_ratio"] <= threshold[1])]
print(f"{checkpoint_shape - df.shape[0]} edges removed")
df.drop(["outflow", "inflow", "outflow_ratio", "inflow_ratio"], axis=1, inplace=True)
return set(tuple(i) for i in df.values.tolist()) | 5be2dec388086b10409a3de008f357540019c5cf | 4,763 |
def result(jid):
""" Displays a job result.
Args:
jid (str): The job id.
"""
job = q.fetch_job(jid)
statuses = {
'queued': 202,
'started': 202,
'finished': 200,
'failed': 500,
'job not found': 404,
}
if job:
job_status = job.get_status()
result = job.result
else:
job_status = 'job not found'
result = None
resp = {
'status': statuses[job_status],
'job_id': jid,
'job_status': job_status,
'result': result}
return jsonify(**resp) | 2919be693949dd4e873834530565fd28aefcf5d5 | 4,764 |
from typing import Callable
def fd_nabla_1(
x: np.ndarray,
fun: Callable,
delta_vec: np.ndarray,
) -> np.ndarray:
"""Calculate FD approximation to 1st order derivative (Jacobian/gradient).
Parameters
----------
x: Parameter vector, shape (n_par,).
fun: Function returning function values. Scalar- or vector-valued.
delta_vec: Step size vector, shape (n_par,).
Returns
-------
nabla_1:
The FD approximation to the 1st order derivatives.
Shape (n_par, ...) with ndim > 1 if `f_fval` is not scalar-valued.
"""
# parameter dimension
n_par = len(x)
nabla_1 = []
for ix in range(n_par):
delta_val = delta_vec[ix]
delta = delta_val * unit_vec(dim=n_par, ix=ix)
fp = fun(x + delta / 2)
fm = fun(x - delta / 2)
nabla_1.append((fp - fm) / delta_val)
return np.array(nabla_1) | 32363e04bbd22627c7e5c21e02b48154dbfc030a | 4,765 |
def get_ref_len_from_bam(bam_path, target_contig):
"""
Fetch the length of a given reference sequence from a :py:class:`pysam.AlignmentFile`.
Parameters
----------
bam_path : str
Path to the BAM alignment
target_contig : str
The name of the contig for which to recover haplotypes.
Returns
-------
end_pos : int
The 1-indexed genomic position at which to stop considering variants.
"""
bam = pysam.AlignmentFile(bam_path)
end = bam.lengths[bam.get_tid(target_contig)]
bam.close()
return end | e80cb3c50f4408b2a614621ff3d688852931e75b | 4,766 |
def vstd(df, n=10):
"""
成交量标准差 vstd(10)
VSTD=STD(Volume,N)=[∑(Volume-MA(Volume,N))^2/N]^0.5
"""
_vstd = pd.DataFrame()
_vstd['date'] = df.date
_vstd['vstd'] = df.volume.rolling(n).std(ddof=1)
return _vstd | 97b448d00bcbe89d17339f9ed1155786d9ccd0ab | 4,767 |
def createMonatomicGas(elm, pascal):
"""createMonatomicGas(elm, pascal)
Create a gas of single atoms of the specified element at the specified pressure in Pascal and 300 K"""
return epq.Gas((elm,), (1,), pascal, 300.0, elm.toString() + " gas at %f Pa" % pascal) | 4552f551c27e0f10dea72c96bc32b9927649f749 | 4,768 |
import torch
def boxes_to_central_line_torch(boxes):
"""See boxes_to_central_line
Args:
boxes (tensor[..., 7]): (x, y, z, l, w, h, theta) of each box
Returns:
boxes_lp (tensor[..., 3]): (a, b, c) line parameters of each box
"""
# in case length is shorter than width
bmask = boxes[..., 3] < boxes[..., 4]
theta = -boxes[..., 6] # not sure why minus is needed
theta[bmask] -= 0.5 * np.pi
a = torch.tan(theta)
b = -torch.ones_like(a)
c = -a * boxes[..., 0] - b * boxes[..., 1]
boxes_lp = torch.stack((a, b, c), dim=-1)
boxes_lp /= torch.linalg.norm(boxes_lp, dim=-1, keepdim=True)
return boxes_lp | e96667177cee058fe5f5cd1e8446df97d976474e | 4,769 |
from pyspark.sql import SparkSession
def load_as_spark(url: str) -> "PySparkDataFrame": # noqa: F821
"""
Load the shared table using the give url as a Spark DataFrame. `PySpark` must be installed, and
the application must be a PySpark application with the Apache Spark Connector for Delta Sharing
installed.
:param url: a url under the format "<profile>#<share>.<schema>.<table>"
:return: A Spark DataFrame representing the shared table.
"""
try:
except ImportError:
raise ImportError("Unable to import pyspark. `load_as_spark` requires PySpark.")
spark = SparkSession.getActiveSession()
assert spark is not None, (
"No active SparkSession was found. "
"`load_as_spark` requires running in a PySpark application."
)
return spark.read.format("deltaSharing").load(url) | d427f71530b982703853146cbaa1ce3585b8f195 | 4,770 |
def calClassSpecificProbPanel(param, expVars, altAvMat, altChosen, obsAv):
"""
Function that calculates the class specific probabilities for each decision-maker in the
dataset
Parameters
----------
param : 1D numpy array of size nExpVars.
Contains parameter values.
expVars : 2D numpy array of size (nExpVars x (nRows)).
Contains explanatory variables.
altAvMat : sparse matrix of size (nRows x nObs).
The (i, j)th element equals 1 if the alternative corresponding to the ith
column in expVars is available to the decision-maker corresponding to the
jth observation, and 0 otherwise.
altChosen : sparse matrix of size (nRows x nObs).
The (i, j)th element equals 1 if the alternative corresponding to the ith
column in expVars was chosen by the decision-maker corresponding to the
jth observation, and 0 otherwise.
obsAv : sparse matrix of size (nObs x nInds).
The (i, j)th element equals 1 if the ith observation in the dataset corresponds
to the jth decision-maker, and 0 otherwise.
Returns
-------
np.exp(lPInd) : 2D numpy array of size 1 x nInds.
Identifies the class specific probabilities for each individual in the
dataset.
"""
v = np.dot(param[None, :], expVars) # v is 1 x nRows
ev = np.exp(v) # ev is 1 x nRows
ev[np.isinf(ev)] = 1e+20 # As precaution when exp(v) is too large for machine
ev[ev < 1e-200] = 1e-200 # As precaution when exp(v) is too close to zero
nev = ev * altAvMat # nev is 1 x nObs
nnev = altAvMat * np.transpose(nev) # nnev is nRows x 1
p = np.divide(ev, np.transpose(nnev)) # p is 1 x nRows
p[np.isinf(p)] = 1e-200 # When none of the alternatives are available
pObs = p * altChosen # pObs is 1 x nObs
lPObs = np.log(pObs) # lPObs is 1 x nObs
lPInd = lPObs * obsAv # lPInd is 1 x nInds
return np.exp(lPInd) # prob is 1 x nInds | ccb867b44db9f0d7f9b35c92ef66a96097b4b881 | 4,771 |
def build_expression_tree(tokens):
"""Returns an ExpressionTree based upon by a tokenized expression."""
s = [] # we use Python list as stack
for t in tokens:
if t in '+-x*/': # t is an operator symbol
s.append(t) # push the operator symbol on the stack
elif t not in '()': # consider t to be a literal
s.append(ExpressionTree(t)) # push trivial tree storing value
elif t == ')' : # compose a new tree from three constituent parts
right = s.pop() # right subtree as per LIFO
op = s.pop() # operator symbol
left = s.pop() # left subtree
s.append(ExpressionTree(op, left, right)) # reconstruct tree and push it back on the stack
# ignore the parenthesis
return s.pop() # the last reconstructed tree | b54ce3c3d784ff80f380774135c7353d6ebd1078 | 4,772 |
import json
def unpack_blockchain(s: str) -> block.Blockchain:
"""Unapck blockchain from JSON string with b64 for bytes."""
blocks = json.loads(s)
return [_unpack_block(block) for block in blocks] | ed43ea73df866489e814fd1bdff357c158aade91 | 4,773 |
import re
def parse(options,full_path):
"""
Parse the data according to several regexes
"""
global p_entering_vip_block, p_exiting_vip_block, p_vip_next, p_vip_number, p_vip_set
in_vip_block = False
vip_list = []
vip_elem = {}
order_keys = []
if (options.input_file != None):
with open(options.input_file, mode=fd_read_options) as fd_input:
for line in fd_input:
line = line.strip()
# We match a vip block
if p_entering_vip_block.search(line):
in_vip_block = True
# We are in a vip block
if in_vip_block:
if p_vip_number.search(line):
vip_number = p_vip_number.search(line).group('vip_number')
vip_number = re.sub('["]', '', vip_number)
vip_elem['id'] = vip_number
if not('id' in order_keys):
order_keys.append('id')
# We match a setting
if p_vip_set.search(line):
vip_key = p_vip_set.search(line).group('vip_key')
if not(vip_key in order_keys):
order_keys.append(vip_key)
vip_value = p_vip_set.search(line).group('vip_value').strip()
vip_value = re.sub('["]', '', vip_value)
vip_elem[vip_key] = vip_value
# We are done with the current vip id
if p_vip_next.search(line):
vip_list.append(vip_elem)
vip_elem = {}
# We are exiting the vip block
if p_exiting_vip_block.search(line):
in_vip_block = False
return (vip_list, order_keys)
else:
# for files in os.listdir(os.path.abspath(options.input_folder)):
with open(full_path, mode=fd_read_options) as fd_input:
for line in fd_input:
line = line.strip()
# We match a vip block
if p_entering_vip_block.search(line):
in_vip_block = True
# We are in a vip block
if in_vip_block:
if p_vip_number.search(line):
vip_number = p_vip_number.search(line).group('vip_number')
vip_number = re.sub('["]', '', vip_number)
vip_elem['id'] = vip_number
if not('id' in order_keys):
order_keys.append('id')
# We match a setting
if p_vip_set.search(line):
vip_key = p_vip_set.search(line).group('vip_key')
if not(vip_key in order_keys):
order_keys.append(vip_key)
vip_value = p_vip_set.search(line).group('vip_value').strip()
vip_value = re.sub('["]', '', vip_value)
vip_elem[vip_key] = vip_value
# We are done with the current vip id
if p_vip_next.search(line):
vip_list.append(vip_elem)
vip_elem = {}
# We are exiting the vip block
if p_exiting_vip_block.search(line):
in_vip_block = False
return (vip_list, order_keys) | 08177b0ab18c77154053249c2308c4705d1dbb65 | 4,774 |
def update_wishlist_games(cur, table, wishlist_args, update_delay):
"""A function to update wishlist games.
:param cur: database cursor object
:type cur: Cursor
:param table: name of table to work on
:type table: str
:param wishlist_args: list of wishlist games to add to database
:type wishlist_args: list
:param update_delay: the amount of time that must pass before updating
:type update_delay: timedelta
"""
# Figure out which games need updating
outdated_games = DB_Calls.wishlist_needs_updating(cur, table, update_delay)
# Fetch deals for new and existing wishlist games
if(wishlist_args or outdated_games):
if(table == DB_Tables.PC_WISHLIST.value):
_table = DB_Tables.PC_WISHLIST.value
games_to_update, new_games = (
PC.get_wishlist_deals(cur, outdated_games+wishlist_args))
elif(table == DB_Tables.PS_WISHLIST.value):
_table = DB_Tables.PS_WISHLIST.value
games_to_update, new_games = (
PS.get_wishlist_deals(cur, outdated_games+wishlist_args))
if(new_games):
DB_Calls.add_games(cur, _table, new_games, games_to_update)
return True
return False | fcd80f19065112893af84d0a9862888a13bde372 | 4,775 |
from re import M
def WrapSignal(signal):
"""Wrap a model signal with a corresponding frontend wrapper."""
if type(signal) is M.BitsSignal:
return BitsFrontend(signal)
elif type(signal) is M.ListSignal:
return ListFrontend(signal)
elif type(signal) is M.BundleSignal:
return BundleFrontend(signal)
else:
assert False, f'Cannot wrap signal of type {type(signal)}' | 374c47d5053853bc2b23d56d40a2752521a1351f | 4,776 |
from typing import Any
def is_array_like(element: Any) -> bool:
"""Returns `True` if `element` is a JAX array, a NumPy array, or a Python
`float`/`complex`/`bool`/`int`.
"""
return isinstance(
element, (jnp.ndarray, np.ndarray, float, complex, bool, int)
) or hasattr(element, "__jax_array__") | acb681e329883742009e3e2543158cd602839ae8 | 4,777 |
def parse(javascript_code):
"""Returns syntax tree of javascript_code.
Syntax tree has the same structure as syntax tree produced by esprima.js
Same as PyJsParser().parse For your convenience :) """
p = PyJsParser()
return p.parse(javascript_code) | 295a6d5683b975a9229e27d06cc1369e6a6f0a95 | 4,778 |
def twitterAuth():
""" Authenticate user using Twitter API generated credentials """
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
return tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) | c0522247e22b2a029c7f954960b1f9f91e71e3cb | 4,779 |
def GetInstalledPackageUseFlags(pkg_str, board=None):
"""Gets the list of USE flags for installed packages matching |pkg_str|.
Args:
pkg_str: The package name with optional category, version, and slot.
board: The board to inspect.
Returns:
A dictionary with the key being a package CP and the value being the list
of USE flags for that package.
"""
cmd = ['qlist']
if board:
cmd = ['qlist-%s' % board]
cmd += ['-CqU', pkg_str]
result = cros_build_lib.RunCommand(
cmd, enter_chroot=True, capture_output=True, error_code_ok=True)
use_flags = {}
if result.returncode == 0:
for line in result.output.splitlines():
tokens = line.split()
use_flags[tokens[0]] = tokens[1:]
return use_flags | 0b203ebe078d56053c4e2c3b23db91492399de55 | 4,780 |
def make_cursor():
"""
Creates a cursor for iterating through results
GetParams:
account: an account
user: a user
handle: a shark client handle
Returns:
a json object container the cursor handle
"""
data, statusCode = cursor()
return jsonify(data), statusCode | 225cf3bdcb001f90041cb94dc5fd89c935daaf24 | 4,781 |
from typing import Any
def run_result_factory(data: list[tuple[Any, Any]]):
"""
We need to handle dt.datetime and agate.table.Table.
The rest of the types should already be JSON-serializable.
"""
d = {}
for key, val in data:
if isinstance(val, dt.datetime):
val = val.isoformat()
elif isinstance(val, agate.table.Table):
# agate Tables have a few print methods but they offer plain
# text representations of the table which are not very JSON
# friendly. There is a to_json method, but I don't think
# sending the whole table in an XCOM is a good idea either.
val = {
k: v.__class__.__name__
for k, v in zip(val._column_names, val._column_types)
}
d[key] = val
return d | 25462e0eaf87d4fcdd1f48161dfa5be4643485f4 | 4,782 |
def compute_steepness(zeroth_moment, peak_wavenumber):
"""Compute characteristic steepness from given peak wave number."""
return np.sqrt(2 * zeroth_moment) * peak_wavenumber | e1cb0beb19ff73e7d2b6a6879d4a388d04644953 | 4,783 |
def secondary_side_radius(mass_ratio, surface_potential):
"""
Side radius of secondary component
:param mass_ratio: float;
:param surface_potential: float;
:return: float; side radius
"""
return calculate_side_radius(1.0, mass_ratio, 1.0, surface_potential, 'secondary') | 3353d5b9cb76f9127ed1066a20a3328fea9b8a46 | 4,784 |
def pts_from_rect_inside(r):
""" returns start_pt, end_pt where end_pt is _inside_ the rectangle """
return (r[0], r[1]), ((r[0] + r[2] - 1), (r[1] + r[3] - 1)) | 51f5ea39763e9f16a2bb3a56eebef4dfe06c5746 | 4,785 |
import numpy as np
def minimum_distance(object_1, object_2):
""" Takes two lists as input
A list of numpy arrays of coordinates that make up object 1 and object 2
Measures the distances between each of the coordinates
Returns the minimum distance between the two objects, as calculated using a vector norm
Stops the calculation and returns 0 if two coordinates overlap
"""
# package import
# main algorithm
minimum_distance = 100000
for coord_1 in object_1:
for coord_2 in object_2:
distance_btwn_coords = np.linalg.norm(coord_1 - coord_2)
if distance_btwn_coords == 0:
minimum_distance = distance_btwn_coords
return float(minimum_distance)
elif distance_btwn_coords < minimum_distance:
minimum_distance = distance_btwn_coords
return float(minimum_distance) | e61fbb1ab83c5147f69351022f59ebab3295cb5a | 4,786 |
def retrieve_pkl_file(filename, verbose = False):
"""
Retrieve and return contents of pkl file
"""
if verbose == True:
start_time = timelib.time()
print("\n * Retrieving %s file ..."%filename)
data = pd.read_pickle(filename)
if verbose == True:
print("\n %s retrieved in %.1f seconds."%(filename, timelib.time() - start_time))
return data; | aa7c108d32ea387c2677c0fccf285437d149ec01 | 4,787 |
def extractIpsFile(containerFile,newSimName):
"""
Given a container file, get the ips file in it and write it to current
directory so that it can be used
"""
oldIpsFile=os.path.splitext(containerFile)[0]+os.extsep+"ips"
zf=zipfile.ZipFile(containerFile,"r")
foundFile=""
# Assume that container file contains 1 ips file.
oldIpsFile=fnmatch.filter(zf.namelist(),"*.ips")[0]
ifile=zf.read(oldIpsFile)
ipsFile=newSimName+".ips"
if os.path.exists(ipsFile):
print "Moving "+ipsFile+" to "+"Save"+ipsFile
shutil.copy(ipsFile, "Save"+ipsFile)
ff=open(ipsFile,"w")
ff.write(ifile)
ff.close()
return ipsFile | a8135c7d3a10825e539819dfdb62d5f677680e44 | 4,788 |
import torch
def nplr(measure, N, rank=1, dtype=torch.float):
""" Return w, p, q, V, B such that
(w - p q^*, B) is unitarily equivalent to the original HiPPO A, B by the matrix V
i.e. A = V[w - p q^*]V^*, B = V B
"""
assert dtype == torch.float or torch.cfloat
if measure == 'random':
dtype = torch.cfloat if dtype == torch.float else torch.cdouble
# w = torch.randn(N//2, dtype=dtype)
w = -torch.exp(torch.randn(N//2)) + 1j*torch.randn(N//2)
P = torch.randn(rank, N//2, dtype=dtype)
B = torch.randn(N//2, dtype=dtype)
V = torch.eye(N, dtype=dtype)[..., :N//2] # Only used in testing
return w, P, B, V
A, B = transition(measure, N)
A = torch.as_tensor(A, dtype=dtype) # (N, N)
B = torch.as_tensor(B, dtype=dtype)[:, 0] # (N,)
P = rank_correction(measure, N, rank=rank, dtype=dtype)
AP = A + torch.sum(P.unsqueeze(-2)*P.unsqueeze(-1), dim=-3)
w, V = torch.linalg.eig(AP) # (..., N) (..., N, N)
# V w V^{-1} = A
# Only keep one of the conjugate pairs
w = w[..., 0::2].contiguous()
V = V[..., 0::2].contiguous()
V_inv = V.conj().transpose(-1, -2)
B = contract('ij, j -> i', V_inv, B.to(V)) # V^* B
P = contract('ij, ...j -> ...i', V_inv, P.to(V)) # V^* P
return w, P, B, V | 0451fa5ed1eeb60bef386991b2d953c190282e0e | 4,789 |
def read_data(oldest_year: int = 2020, newest_year: int = 2022):
"""Read in csv files of yearly covid data from the nytimes and concatenate into a single pandas DataFrame.
Args:
oldest_year: first year of data to use
newest_year: most recent year of data to use
"""
df_dicts = {} # dictionary to hold the data for each year before concatenation
for year in range(oldest_year, newest_year + 1):
df_dicts[f"df_{year}"] = pd.read_csv(
f"https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us-counties-{year}.csv",
index_col="date",
)
logger.info("data read in successfully")
return pd.concat(df_dicts.values()) | 7b8e55ae41890eef3e4f0ac5a9502b8b19f1ad20 | 4,790 |
def ip_is_v4(ip: str) -> bool:
"""
Determines whether an IP address is IPv4 or not
:param str ip: An IP address as a string, e.g. 192.168.1.1
:raises ValueError: When the given IP address ``ip`` is invalid
:return bool: True if IPv6, False if not (i.e. probably IPv4)
"""
return type(ip_address(ip)) == IPv4Address | d0fa8351921e34ee44c1b6c9fecf14c0efe83397 | 4,791 |
def kdump(self_update=False, snapshot=None):
"""Regenerate kdump initrd
A new initrd for kdump is created in a snapshot.
self_update
Check for newer transactional-update versions.
snapshot
Use the given snapshot or, if no number is given, the current
default snapshot as a base for the next snapshot. Use
"continue" to indicate the last snapshot done.
CLI Example:
.. code-block:: bash
salt microos transactional_update kdump snapshot="continue"
"""
cmd = ["transactional-update"]
cmd.extend(_global_params(self_update=self_update, snapshot=snapshot))
cmd.append("kdump")
return _cmd(cmd) | fd49bf6bfb4af52625b4e479eca60594edb59d9e | 4,792 |
import logging
from datetime import datetime
def register_keywords_user(email, keywords, price):
"""Register users then keywords and creates/updates doc
Keyword arguments:
email - email for user
keywords - string of keywords
price -- (optional) max price can be set to None
"""
logging.info('[INFO] Registering user email \'{}\' '.format(email))
# create user doc if doesn't exist
db = utils.get_db_handle('users')
doc = db.find_one({ 'email': email })
# metadata
keywords_id = keywords.replace(" ", "_")
date = str(datetime.datetime.now()).split('.')[0]
num_keywords = 0
list_keywords = []
if doc == None:
doc = db.insert_one({
'email': email,
'dateCreated': date,
'numKeywords': num_keywords,
'keywords': []
})
logging.info('[INFO] Creating new user doc {} with _id: {}'.format(email, doc.inserted_id))
else:
num_keywords = doc['numKeywords']
list_keywords = doc['keywords']
logging.info('[INFO] Found user doc \'{}\' with {} keywords'.format(email, num_keywords))
# insert keywords info along in user doc
max_keywords = 5
if not utils.check_key_exists(list_keywords, keywords_id):
if num_keywords < max_keywords:
update = utils.update_users_doc(db, email, keywords_id, price, date)
if update:
logging.info('[INFO] Successfully created or updated doc for \'{}\''.format(email))
else:
logging.info('[INFO] Error creating or updating doc for \'{}\''.format(email))
return False, 'ERROR_CREATE_DOC'
else:
logging.info('[INFO] Unable to create doc for \'{}\''.format(email))
logging.info('[INFO] Number of keywords exceed maximum of {}'.format(max_keywords))
return False, 'MAX_KEYWORDS_LIMIT'
else:
logging.info('[INFO] Unable to create doc for \'{}\''.format(email))
logging.info('[INFO] Duplicate key {} for user {}'.format(max_keywords, email))
return False, 'ERROR_DUPE_KEY'
logging.info('[INFO] Registering keywords \'{}\' for email \'{}\' with price \'{}\''.format(keywords, email, price))
# create keywords doc if doesn't exist
db = utils.get_db_handle('keywords')
doc = db.find_one({ 'keyword': keywords_id })
# keywords metadata
date = str(datetime.datetime.now()).split('.')[0]
if doc == None:
doc = db.insert_one({
'keyword': keywords_id,
'subreddit': 'frugalmalefashion',
'dateCreated': date,
'users': []
})
logging.info('[INFO] Creating new keywords doc {} with _id: {}'.format(keywords_id, doc.inserted_id))
else:
logging.info('[INFO] Found keywords doc \'{}\''.format(keywords_id))
# insert user info along in keyword doc
update = utils.update_keywords_doc(db, keywords_id, email, price, date)
if update:
logging.info('[INFO] Successfully created or updated doc for \'{}\''.format(keywords_id))
else:
logging.error('[ERROR] Error creating or updating doc for \'{}\''.format(keywords_id))
return False, 'ERROR_CREATE_DOC'
return True, None | 09c0d3ff12fbd99d6e6a6c23906a74b525f91649 | 4,793 |
def plot_distribution(df, inv, ax=None, distribution=None, tau_plot=None, plot_bounds=True, plot_ci=True,
label='', ci_label='', unit_scale='auto', freq_axis=True, area=None, normalize=False,
predict_kw={}, **kw):
"""
Plot the specified distribution as a function of tau.
Parameters
----------
df : pandas DataFrame
DataFrame containing experimental EIS data. Used only for scaling and frequency bounds
If None is passed, scaling will not be performed and frequency bounds will not be drawn
inv : Inverter instance
Fitted Inverter instance
ax : matplotlib axis
Axis on which to plot
distribution : str, optional (default: None)
Name of distribution to plot. If None, first distribution in inv.distributions will be used
tau_plot : array, optonal (default: None)
Time constant grid over which to evaluate the distribution.
If None, a grid extending one decade beyond the basis time constants in each direction will be used.
plot_bounds : bool, optional (default: True)
If True, indicate frequency bounds of experimental data with vertical lines.
Requires that DataFrame of experimental data be passed for df argument
plot_ci : bool, optional (default: True)
If True, plot the 95% credible interval of the distribution (if available).
label : str, optional (default: '')
Label for matplotlib
unit_scale : str, optional (default: 'auto')
Scaling unit prefix. If 'auto', determine from data.
Options are 'mu', 'm', '', 'k', 'M', 'G'
freq_axis : bool, optional (default: True)
If True, add a secondary x-axis to display frequency
area : float, optional (default: None)
Active area. If provided, plot the area-normalized distribution
normalize : bool, optional (default: False)
If True, normalize the distribution such that the polarization resistance is 1
predict_kw : dict, optional (default: {})
Keyword args to pass to Inverter predict_distribution() method
kw : keyword args, optional
Keyword args to pass to maplotlib.pyplot.plot
Returns
-------
ax : matplotlib axis
Axis on which distribution is plotted
"""
if ax is None:
fig, ax = plt.subplots(figsize=(3.5, 2.75))
# If no distribution specified, use first distribution
if distribution is None:
distribution = list(inv.distributions.keys())[0]
# If tau_plot not given, go one decade beyond basis tau in each direction
if tau_plot is None:
basis_tau = inv.distributions[distribution]['tau']
tmin = np.log10(np.min(basis_tau)) - 1
tmax = np.log10(np.max(basis_tau)) + 1
num_decades = tmax - tmin
tau_plot = np.logspace(tmin, tmax, int(20 * num_decades + 1))
F_pred = inv.predict_distribution(distribution, tau_plot, **predict_kw)
if normalize and area is not None:
raise ValueError('If normalize=True, area cannot be specified.')
if area is not None:
if df is not None:
for col in ['Zmod', 'Zreal', 'Zimag']:
df[col] *= area
F_pred *= area
if normalize:
Rp_kw = predict_kw.copy()
# if time given, calculate Rp at given time
if 'time' in predict_kw.keys():
Rp_kw['times'] = [predict_kw['time'], predict_kw['time']]
del Rp_kw['time']
Rp = inv.predict_Rp(**Rp_kw)
F_pred /= Rp
if unit_scale == 'auto':
if normalize:
unit_scale = ''
elif df is not None:
unit_scale = get_unit_scale(df, area)
else:
unit_map = {-2: '$\mu$', -1: 'm', 0: '', 1: 'k', 2: 'M', 3: 'G'}
F_max = np.max(F_pred)
F_ord = np.floor(np.log10(F_max) / 3)
unit_scale = unit_map.get(F_ord, '')
scale_factor = get_factor_from_unit(unit_scale)
ax.plot(tau_plot, F_pred / scale_factor, label=label, **kw)
if plot_ci:
if inv.fit_type.find('bayes') >= 0:
F_lo = inv.predict_distribution(distribution, tau_plot, percentile=2.5, **predict_kw)
F_hi = inv.predict_distribution(distribution, tau_plot, percentile=97.5, **predict_kw)
if area is not None:
F_lo *= area
F_hi *= area
if normalize:
F_lo /= Rp
F_hi /= Rp
ax.fill_between(tau_plot, F_lo / scale_factor, F_hi / scale_factor, color='k', alpha=0.2, label=ci_label)
ax.set_xscale('log')
ax.set_xlabel(r'$\tau$ / s')
if plot_bounds:
if df is not None:
ax.axvline(1 / (2 * np.pi * df['Freq'].max()), c='k', ls=':', alpha=0.6, zorder=-10)
ax.axvline(1 / (2 * np.pi * df['Freq'].min()), c='k', ls=':', alpha=0.6, zorder=-10)
if area is not None:
ax.set_ylabel(fr'$\gamma \, (\ln{{\tau}})$ / {unit_scale}$\Omega\cdot\mathrm{{cm}}^2$')
elif normalize:
ax.set_ylabel(fr'$\gamma \, (\ln{{\tau}}) / R_p$')
else:
ax.set_ylabel(fr'$\gamma \, (\ln{{\tau}})$ / {unit_scale}$\Omega$')
# add freq axis to DRT plot
if freq_axis:
# check for existing twin axis
all_axes = ax.figure.axes
ax2 = None
for other_ax in all_axes:
if other_ax.bbox.bounds == ax.bbox.bounds and other_ax is not ax:
ax2 = other_ax
break
else:
continue
if ax2 is None:
ax2 = ax.twiny()
ax2.set_xscale('log')
ax2.set_xlim(ax.get_xlim())
f_powers = np.arange(7, -4.1, -2)
f_ticks = 10 ** f_powers
ax2.set_xticks(1 / (2 * np.pi * f_ticks))
ax2.set_xticklabels(['$10^{{{}}}$'.format(int(p)) for p in f_powers])
ax2.set_xlabel('$f$ / Hz')
# Indicate zero if necessary
if np.min(F_pred) >= 0:
ax.set_ylim(0, ax.get_ylim()[1])
else:
ax.axhline(0, c='k', lw=0.5)
return ax | f5f6eb29597abb34b4e0c634112370824cedf907 | 4,794 |
def profitsharing_order(self, transaction_id, out_order_no, receivers, unfreeze_unsplit,
appid=None, sub_appid=None, sub_mchid=None):
"""请求分账
:param transaction_id: 微信支付订单号,示例值:'4208450740201411110007820472'
:param out_order_no: 商户分账单号,只能是数字、大小写字母_-|*@,示例值:'P20150806125346'
:param receivers: 分账接收方列表,最多可有50个分账接收方,示例值:[{'type':'MERCHANT_ID', 'account':'86693852', 'amount':888, 'description':'分给商户A'}]
:param unfreeze_unsplit: 是否解冻剩余未分资金,示例值:True, False
:param appid: 应用ID,可不填,默认传入初始化时的appid,示例值:'wx1234567890abcdef'
:param sub_appid: (服务商模式)子商户应用ID,示例值:'wxd678efh567hg6999'
:param sub_mchid: (服务商模式)子商户的商户号,由微信支付生成并下发。示例值:'1900000109'
"""
params = {}
if transaction_id:
params.update({'transaction_id': transaction_id})
else:
raise Exception('transaction_id is not assigned')
if out_order_no:
params.update({'out_order_no': out_order_no})
else:
raise Exception('out_order_no is not assigned')
if isinstance(unfreeze_unsplit, bool):
params.update({'unfreeze_unsplit': unfreeze_unsplit})
else:
raise Exception('unfreeze_unsplit is not assigned')
if isinstance(receivers, list):
params.update({'receivers': receivers})
else:
raise Exception('receivers is not assigned')
for receiver in params.get('receivers'):
if receiver.get('name'):
receiver['name'] = self._core.encrypt(receiver.get('name'))
params.update({'appid': appid or self._appid})
if self._partner_mode:
if sub_appid:
params.update({'sub_appid': sub_appid})
if sub_mchid:
params.update({'sub_mchid': sub_mchid})
else:
raise Exception('sub_mchid is not assigned.')
path = '/v3/profitsharing/orders'
return self._core.request(path, method=RequestType.POST, data=params) | 8885a953de7e74a562fc57ac242fafbf79ada7a8 | 4,795 |
def merge_time_batch_dims(x: Tensor) -> Tensor:
"""
Pack the time dimension into the batch dimension.
Args:
x: input tensor
Returns:
output tensor
"""
if xnmt.backend_dynet:
((hidden_dim, seq_len), batch_size_) = x.dim()
return dy.reshape(x, (hidden_dim,), batch_size=batch_size_ * seq_len)
else:
batch_size_, seq_len, hidden_dim = x.size()
return x.view((batch_size_ * seq_len, hidden_dim)) | 73b09ca714870f18523c07b82e544b208fcde680 | 4,796 |
def get_log_likelihood(P, v, subs_counts):
"""
The stationary distribution of P is empirically derived.
It is proportional to the codon counts by construction.
@param P: a transition matrix using codon counts and free parameters
@param v: stationary distribution proportional to observed codon counts
@param subs_counts: observed substitution counts
"""
A = subs_counts
B = algopy.log(P.T * v)
log_likelihoods = slow_part(A, B)
return algopy.sum(log_likelihoods) | b7ed78e1e111a74f08b36f5ac41618318539d1c7 | 4,797 |
def union(l1, l2):
""" return the union of two lists """
return list(set(l1) | set(l2)) | 573e3b0e475b7b33209c4a477ce9cab53ec849d4 | 4,798 |
def actual_kwargs():
"""
Decorator that provides the wrapped function with an attribute 'actual_kwargs' containing just those keyword
arguments actually passed in to the function.
Based on code from http://stackoverflow.com/a/1409284/127480
"""
def decorator(function):
def inner(*args, **kwargs):
inner.actual_kwargs = kwargs
inner.actual_kwargs_except = \
lambda keys: {key: value for key, value in kwargs.iteritems() if key not in keys}
return function(*args, **kwargs)
return inner
return decorator | 37477edecb9442f759f4a234ea9037f7568f9770 | 4,799 |
Subsets and Splits