content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _svd_classification(dataset='mnist_small'):
"""
svd on classificaiton dataset
Inputs:
dataset: (str) name of dataset
Outputs:
accuracy on predicted values
"""
if dataset=='rosenbrock':
x_train, x_valid, x_test, y_train, y_valid, y_test = load_dataset('rosenbrock', n_train=5000, d=2)
else:
x_train, x_valid, x_test, y_train, y_valid, y_test = load_dataset(dataset)
x_total = np.vstack([x_train, x_valid])
y_total = np.vstack([y_train, y_valid])
X = np.ones((len(x_total), len(x_total[0]) + 1))
X[:, 1:] = x_total
U, S, Vh = np.linalg.svd(X)
# Invert Sigma
sig = np.diag(S)
filler = np.zeros([len(x_total)-len(S), len(S)])
sig_inv = np.linalg.pinv(np.vstack([sig, filler]))
# Compute weights
w = Vh.T @ (sig_inv @ (U.T @ y_total))
# Make test predictions
X_test = np.ones((len(x_test), len(x_test[0]) + 1))
X_test[:, 1:] = x_test
predictions = np.argmax(X_test @ w, axis=1)
y_test = np.argmax(1 * y_test, axis=1)
return (predictions == y_test).sum() / len(y_test)
|
55eae147130a4552ba33f466f6127ab5df1323b9
| 30,420 |
def set_answer(set_number):
"""
get result answer
>>> set_answer(600851475143)
6857
>>> set_answer(3000)
5
"""
while True:
prime_fac = prime_factorization(set_number)
if prime_fac < set_number:
set_number //= prime_fac
else:
return set_number
|
b601a764123b737993c1b822ff466f47ca5caea6
| 30,421 |
import torch
def model_infer(model, test_images, test_affinities, test_beliefs, args):
"""
Parameters:
model: object with the trained model
test_images: batch of images (float32), size: (test_batch_size,3,x,y)
test_affinities: batch of affinity maps (float32), size: (test_batch_size,16,x/8,y/8)
test_beliefs: batch of belief maps (float32), size: (test_batch_size,9,x/8,y/8)
Returns:
loss: scalar
belief: output belief maps, size: size: (test_batch_size,9,x/8,y/8)
affinity: output affinity maps, size: (test_batch_size,16,x/8,y/8)
"""
if torch.cuda.is_available():
test_images_v = Variable(test_images.cuda(device=args.gpu_device))
test_beliefs_v = Variable(test_beliefs.cuda(device=args.gpu_device))
test_affinities_v = Variable(test_affinities.cuda(device=args.gpu_device))
else:
test_images_v = Variable(test_images)
test_beliefs_v = Variable(test_beliefs)
test_affinities_v = Variable(test_affinities)
# This shall be adjusted according to the specific model
with torch.no_grad():
output_belief, output_affinity = model.forward(test_images_v)
J = compute_loss(output_belief, output_affinity, test_beliefs_v, test_affinities_v)
belief = output_belief[5].data.cpu().numpy()
affinity = output_affinity[5].data.cpu().numpy()
loss = J.data.cpu().numpy()
return belief, affinity, loss
|
32534691056c96e3c96adaebfc112da7525ef6dd
| 30,422 |
import re
def clean_value(value, suffix):
"""
Strip out copy suffix from a string value.
:param value: Current value e.g "Test Copy" or "test-copy" for slug fields.
:type value: `str`
:param suffix: The suffix value to be replaced with an empty string.
:type suffix: `str`
:return: Stripped string without the suffix.
"""
# type: (str, str) -> str
return re.sub(r"([\s-]?){}[\s-][\d]$".format(suffix), "", value, flags=re.I)
|
d2ec3b3affbf71411039f234c05935132205ae16
| 30,423 |
def list_devices_to_string(list_item):
"""Convert cfg devices into comma split format.
Args:
list_item (list): list of devices, e.g. [], [1], ["1"], [1,2], ...
Returns:
devices (string): comma split devices
"""
return ",".join(str(i) for i in list_item)
|
717f40d3fd0c24b93d5859491d3f9f16a2b0a069
| 30,424 |
def trace_module(no_print=True):
""" Trace plot series module exceptions """
mname = 'series'
fname = 'plot'
module_prefix = 'putil.plot.{0}.Series.'.format(mname)
callable_names = (
'__init__',
'data_source',
'label',
'color',
'marker',
'interp',
'line_style',
'secondary_axis'
)
return docs.support.trace_support.run_trace(
mname, fname, module_prefix, callable_names, no_print, ['putil.eng']
)
|
b46e69836257de525e55346872f01cb338e036c9
| 30,425 |
def get_ids(values):
"""Transform numeric identifiers, corpora shortcodes (slugs),
and two-letter ISO language codes, into their corresponding numeric
identifier as per the order in CORPORA_SOURCES.
:return: List of indices in CORPORA_SOURCES
:rtype: list
"""
if "all" in values:
ids = list(range(len(CORPORA_SOURCES)))
else:
ids = []
for index, corpus_info in enumerate(CORPORA_SOURCES):
corpus_id = index + 1
props = corpus_info["properties"]
if (str(corpus_id) in values
or props["slug"] in values
or props["language"] in values):
ids.append(index)
return ids
|
482c3940a0a8492820d94d5a9af41c5891c82406
| 30,426 |
import click
def quiet_option(func):
"""Add a quiet option."""
def _callback(ctx, unused_param, value):
_set_verbosity(ctx, -value)
return value
return click.option('-q', '--quiet', count=True,
expose_value=False, help='Decreases verbosity.',
callback=_callback)(func)
|
b13dd670cd06136fbccfccff23ed27233efcb7bd
| 30,427 |
def _compute_gaussian_fwhm(spectrum, regions=None):
"""
This is a helper function for the above `gaussian_fwhm()` method.
"""
fwhm = _compute_gaussian_sigma_width(spectrum, regions) * gaussian_sigma_to_fwhm
return fwhm
|
b5e83752141830911a3d1e26cce10c82b1414740
| 30,428 |
from re import T
from typing import Optional
from typing import Callable
from typing import Any
from typing import List
async def sorted(
iterable: AnyIterable[T],
*,
key: Optional[Callable[[T], Any]] = None,
reverse: bool = False,
) -> List[T]:
"""
Sort items from an (async) iterable into a new list
The optional ``key`` argument specifies a one-argument (async) callable, which
provides a substitute for determining the sort order of each item.
The special value and default :py:data:`None` represents the identity functions,
i.e. compares items directly.
The default sort order is ascending, that is items with ``a < b``
imply ``result.index(a) < result.index(b)``. Use ``reverse=True``
for descending sort order.
.. note::
The actual sorting is synchronous,
so a very large ``iterable`` or very slow comparison
may block the event loop notably.
It is guaranteed to be worst-case O(n log n) runtime.
"""
if key is None:
try:
return _sync_builtins.sorted(iterable, reverse=reverse) # type: ignore
except TypeError:
pass
key = _awaitify(key) if key is not None else _identity
keyed_items = [(await key(item), item) async for item in aiter(iterable)]
keyed_items.sort(key=lambda ki: ki[0], reverse=reverse)
return [item for key, item in keyed_items]
|
628336093c282f340f3ad2f750227e1286ceefef
| 30,429 |
def config_split(config):
"""
Splits a config dict into smaller chunks.
This helps to avoid sending big config files.
"""
split = []
if "actuator" in config:
for name in config["actuator"]:
split.append({"actuator": {name: config["actuator"][name]}})
del(config["actuator"])
split.append(config)
return split
|
2006534ece382c55f1ba3914300f5b6960323e53
| 30,430 |
def transl(x, y=None, z=None):
"""
Create or decompose translational homogeneous transformations.
Create a homogeneous transformation
===================================
- T = transl(v)
- T = transl(vx, vy, vz)
The transformation is created with a unit rotation submatrix.
The translational elements are set from elements of v which is
a list, array or matrix, or from separate passed elements.
Decompose a homogeneous transformation
======================================
- v = transl(T)
Return the translation vector
"""
if y==None and z==None:
x=mat(x)
try:
if ishomog(x):
return x[0:3,3].reshape(3,1)
else:
return concatenate((concatenate((eye(3),x.reshape(3,1)),1),mat([0,0,0,1])))
except AttributeError:
n=len(x)
r = [[],[],[]]
for i in range(n):
r = concatenate((r,x[i][0:3,3]),1)
return r
elif y!=None and z!=None:
return concatenate((concatenate((eye(3),mat([x,y,z]).T),1),mat([0,0,0,1])))
|
d3b47af2ea8f130559f19dea269fbd1a50a8559c
| 30,431 |
def find_next_square2(sq: int) -> int:
"""
This version is just more compact.
"""
sqrt_of_sq = sq ** (1/2)
return -1 if sqrt_of_sq % 1 != 0 else int((sqrt_of_sq + 1) ** 2)
|
62246b78cc065b629961a7283671e776481a8659
| 30,432 |
import colorsys
def hex_2_hsv(hex_col):
"""
convert hex code to colorsys style hsv
>>> hex_2_hsv('#f77f00')
(0.08569500674763834, 1.0, 0.9686274509803922)
"""
hex_col = hex_col.lstrip('#')
r, g, b = tuple(int(hex_col[i:i+2], 16) for i in (0, 2 ,4))
return colorsys.rgb_to_hsv(r/255.0, g/255.0, b/255.0)
|
a80e9c5470dfc64c61d12bb4b823411c4a781bef
| 30,433 |
from pathlib import Path
def _drivers_dir() -> str:
"""
ドライバ格納ディレクトリのパスを返します
:return: ドライバ格納ディレクトリのパス
"""
return str(Path(__file__).absolute().parent.parent.joinpath('drivers'))
|
45b173099f6df24398791ec33332072a7651fa4f
| 30,434 |
import types
def create_list_response_value(
*,
authorization: types.TAuthorization,
uri: types.TUri,
auth_info: types.CredentialsAuthInfo,
) -> types.TResponseValue:
"""
Calculate the response for a list type response.
Raises NotFoundError when the uri is not linked to a known spec.
Args:
uri: The requested uri.
auth_info: Information about the user.
Returns:
The html to return to the user for the request.
"""
assert uri.startswith("/")
assert uri.endswith("/")
spec_id = uri[1:-1]
try:
version_infos = package_database.get().list_spec_versions(
sub=auth_info.sub, name=spec_id
)
except package_database.exceptions.NotFoundError as exc:
raise exceptions.NotFoundError(
f"could not find package with {spec_id=}"
) from exc
host = "index.package.openalchemy.io"
def package_name(version: str) -> str:
"""Calculate the name of the package."""
return f"{spec_id.replace('-', '_')}-{version}.tar.gz"
install_links = list(
map(
lambda version_info: (
f'<a href="https://'
f"{authorization.public_key}:{authorization.secret_key}"
f"@{host}/{spec_id}/"
f'{package_name(version_info["version"])}">'
f'{package_name(version_info["version"])}</a><br>'
),
version_infos,
)
)
joined_install_links = "\n".join(install_links)
return f"""
<body>
{joined_install_links}
</body>
"""
|
492d5a93b7db393dafde48c66d323dda64c7e32c
| 30,435 |
def get_variables(expr):
"""
Get variables of an expression
"""
if isinstance(expr, NegBoolView):
# this is just a view, return the actual variable
return [expr._bv]
if isinstance(expr, _NumVarImpl):
# a real var, do our thing
return [expr]
vars_ = []
# if list or Expr: recurse
if is_any_list(expr):
for subexpr in expr:
vars_ += get_variables(subexpr)
elif isinstance(expr, Expression):
for subexpr in expr.args:
vars_ += get_variables(subexpr)
# else: every non-list, non-expression
return vars_
|
6326ae90f0fa6daa04e0dedc95cf52f4081fa359
| 30,436 |
def tousLesIndices(stat):
"""
Returns the indices of all the elements of the graph
"""
return stat.node2com.keys()
#s=stat.node2com.values()
global globAuthorIndex
global globTfIdfTab
#pprint(globAuthorIndex)
#pprint(stat.node2com.values())
#glob node->index
return [globAuthorIndex[x] for x in stat.node2com]
#return stat.node2com.values()
#def varianceGroupe():
#def distanceListePointsCentre(indexsCommunaute, centre):
|
fa847ee3913d521778ee3462c8e946f0ff001c76
| 30,437 |
def edit_tx_sheet(request, sheet_id):
"""Allows the user to edit treatment sheet fields and updates the date of the sheet"""
tx_sheet = get_object_or_404(TxSheet, id=sheet_id)
form = TxSheetForm(instance=tx_sheet)
if request.user == tx_sheet.owner:
if request.method == 'POST':
form = TxSheetForm(data=request.POST)
if form.is_valid():
defaults = {'owner': request.user,
'name': request.POST['name'],
'comment': request.POST['comment'],
'date': date.today()}
tx_sheet = form.update(sheet_id=sheet_id, defaults=defaults)
return redirect(tx_sheet)
return render(request, 'tx_sheet/tx_sheet_edit.html', {'navbar': 'tx_sheet', 'form': form})
else:
raise PermissionDenied
|
04ef22e069cc329d4eceae5ae567867fb31f787c
| 30,438 |
import warnings
def system(
W, L_x, L_sc_up, L_sc_down, z_x, z_y, a, shape, transverse_soi,
mu_from_bottom_of_spin_orbit_bands, k_x_in_sc, wraparound, infinite,
sc_leads=False, no_phs=False, rough_edge=None,
phs_breaking_potential=False):
"""Create zigzag system
Parameters
----------
W : float
Width of the semiconductor (or contact seperation of the junction.)
L_x : float
Length of the system (x-dimension).
L_sc_up : float
Minimum width of the top superconductor.
L_sc_down : float
Minimum width of the bottom superconductor.
z_x : float
Period of zigzag.
z_y : float
Amplitude of zigzag.
a : float
Lattice spacing.
shape : string
Can be either 'sawtooth' for zigzag shape, or 'parallel_curve'
for a shape formed by curve parallel to a sine curve.
transverse_soi : bool
Toggle Rashba spin-orbit in the y-direction.
mu_from_bottom_of_spin_orbit_bands : bool
Toggle counting chemical potential from bottom of spin orbit band.
k_x_in_sc : bool
Toggle whether superconductor have hopping in the x-direction.
wraparound : bool
Toggle a wraparound system, such that the translational invariance
is transformed into the momentum parameter k_x.
infinite : bool
Toggle whether the system contains a z_x periodic
translational invariance.
sc_leads : bool, optional
Toggle superconducting leads in y-direction.
no_phs : bool, optional
Remove particle-hole symmetry by removing the electron-hole orbital.
rough_edge : bool, optional
Toggle roughened edges to shape.
phs_breaking_potential : bool, optional
Add particle-hole symmetry breaking potential to allow for a
computationally cheaper way to calculate the Majorana decay length.
Returns
-------
kwant.builder.FiniteSystem or kwant.builder.InfiniteSystem
"""
if wraparound and not infinite:
raise ValueError('If you want to use wraparound, infinite must be True.')
if sc_leads and not infinite or sc_leads and not wraparound:
raise ValueError('If you want to use sc_leads, infinite and wraparound must be True.')
template_strings = get_template_strings(
transverse_soi, mu_from_bottom_of_spin_orbit_bands,
k_x_in_sc, False, no_phs, phs_breaking_potential)
template = {k: discretize(v, coords=('x', 'y'), grid_spacing=a)
for k, v in template_strings.items()}
shapes = get_shapes(shape, a, z_x, z_y, W, L_x, L_sc_down, L_sc_up, rough_edge)
syst = kwant.Builder(kwant.TranslationalSymmetry([L_x, 0]) if infinite else None)
for y in np.arange(-W - L_sc_down, W + L_sc_up, a):
# We're unsure about the location of the barrier
# so we loop over all possible sites.
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
sites = syst.fill(template['barrier'], shapes['edge'], (0, y))
syst.fill(template['normal'], *shapes['normal'])
if L_sc_up > 0:
syst.fill(template['sc_top'], *shapes['sc_top'])
if L_sc_down > 0:
syst.fill(template['sc_bot'], *shapes['sc_bot'])
if infinite and wraparound:
syst = kwant.wraparound.wraparound(syst)
if sc_leads:
lead_up = kwant.Builder(kwant.TranslationalSymmetry([L_x, 0], [0, a]))
lead_down = kwant.Builder(kwant.TranslationalSymmetry([L_x, 0], [0, -a]))
lead_up = kwant.wraparound.wraparound(lead_up, keep=1)
lead_down = kwant.wraparound.wraparound(lead_down, keep=1)
lead_up.fill(template_sc_top, lambda s: 0 <= s.pos[0] < L_x, (0, 0))
lead_down.fill(template_sc_bot, lambda s: 0 <= s.pos[0] < L_x, (0, 0))
syst.attach_lead(lead_up)
syst.attach_lead(lead_down)
return syst.finalized()
|
101f3fffeb333cb9c53c2ef930e17fa0f7e08966
| 30,439 |
def get_state_x1_pure_state_vector() -> np.ndarray:
"""Returns the pure state vector for :math:`|-\\rangle`.
:math:`|-\\rangle := \\frac{1}{\\sqrt{2}} (|0\\rangle - |1\\rangle)`
Returns
-------
np.ndarray
the pure state vector.
"""
vec = (1 / np.sqrt(2)) * np.array([1, -1], dtype=np.complex128)
return vec
|
81d52d34492a0b57206d3cf55ceb9dd939a7cdf8
| 30,441 |
def user_create(user_data):
""" Cria um usuário no banco de dados e retorna o objeto criado """
user_model = get_user_model()
user = user_model.objects.create_user(**user_data)
return user
|
163c742601d25a7b04e572ea6b32de6625b99de5
| 30,442 |
from typing import Counter
def shannon_entropy(text: str) -> float:
"""
same definition as in feature processor for feature extraction
calculates shannon entropy of a given string
"""
content_char_counts = Counter([ch for ch in text])
total_string_size = len(text)
entropy: float = 0
for ratio in [char_count / total_string_size for char_count in content_char_counts.values()]:
entropy -= ratio * log(ratio, 2)
return entropy
|
e3092f8620b809a3d935ad16290d077122f6b1df
| 30,443 |
def solve(inputmatrix):
"""
This function contains a solution to the data in 4be741c5.json posed by the Abstraction and
Reasoning Corpus (ARC).
The problem presents an n x m grid, with some rows containing 0-m coloured squares with repetition over a row or colomuns.
The solution requires the rows to be ordered such that it get color of all unique colors if it is either row-wise or colomun-wise.
"""
#Empty result list to return results
result=[]
#convert input to numpy array
y = np.array([np.array(xi) for xi in inputmatrix])
if len(np.unique(y[:1][0]))>1:#if the count of unique colors is more than one
indexes = np.unique(y[:1][0], return_index=True)[1] #Get the indexes of unique colour
row=[y[:1][0][index] for index in sorted(indexes)]#Get the unique colors in unsorted list
result.append(row)#append row to result
else:#if colour are in colomun
indexes = np.unique(y[:, 0], return_index=True)[1]#Get the indexes of unique colour
colomun = [y[:, 0][index] for index in sorted(indexes)]#Get the unique colors in unsorted list
for value in colomun:
result.append([value])#Appending values to the result
return (result)
|
33f27b9bd57ce00972d8c29e7ae3a0ac71dd2455
| 30,444 |
def compute_rewards(s1, s2):
"""
input: s1 - state before action
s2 - state after action
rewards based on proximity to each goal
"""
r = []
for g in TASKS:
dist1 = np.linalg.norm(s1 - g)
dist2 = np.linalg.norm(s2 - g)
reward = dist1 - dist2
r.append(reward)
return r
|
1a6ee67ce581dc07d735e15fe62d09561cf0453f
| 30,445 |
import ipaddress
def decode(i_dunno):
"""
Decode an I-DUNNO representation into an ipaddress.IPv6Address or an ipaddress.IPv4Address object.
A ValueError is raised if decoding fails due to invalid notation or resulting IP address is invalid.
The output of this function SHOULD NOT be presented to humans, as recommended by RFC8771.
"""
bits = []
for char in i_dunno.decode('utf-8'):
num = ord(char)
for minimum, length in utf8_lengths:
if num < (1 << length) and (minimum == 0 or num >= (1 << minimum)):
bits += int_to_bits(num, length)
break
else:
raise ValueError('invalid I-DUNNO')
addr = bits_to_bytes(bits)
if len(addr) == 16:
cls = ipaddress.IPv6Address
elif len(addr) == 4:
cls = ipaddress.IPv4Address
else:
raise ValueError('invalid I-DUNNO')
try:
return cls(addr)
except ipaddress.AddressValueError:
raise ValueError('invalid IP address')
|
557da5af7b33d988754f67ecd4574be9bdc85784
| 30,446 |
def location_descriptors():
"""Provide possible templated_sequence input."""
return [
{
"id": "NC_000001.11:15455",
"type": "LocationDescriptor",
"location": {
"sequence_id": "ncbi:NC_000001.11",
"interval": {
"start": {
"type": "Number",
"value": 15455
},
"end": {
"type": "Number",
"value": 15456
}
},
"type": "SequenceLocation"
},
"label": "NC_000001.11:15455",
},
{
"id": "NC_000001.11:15566",
"type": "LocationDescriptor",
"location": {
"sequence_id": "ncbi:NC_000001.11",
"interval": {
"start": {
"type": "Number",
"value": 15565
},
"end": {
"type": "Number",
"value": 15566
}
},
"type": "SequenceLocation"
},
"label": "NC_000001.11:15566",
},
{
"id": "chr12:p12.1",
"type": "LocationDescriptor",
"location": {
"species_id": "taxonomy:9606",
"chr": "12",
"interval": {"start": "p12.1", "end": "p12.1"}
},
"label": "chr12:p12.1",
},
{
"id": "chr12:p12.2",
"type": "LocationDescriptor",
"location": {
"species_id": "taxonomy:9606",
"chr": "12",
"interval": {"start": "p12.2", "end": "p12.2"}
},
"label": "chr12:p12.2",
},
{
"id": "NC_000001.11:15455-15566",
"type": "LocationDescriptor",
"location": {
"sequence_id": "ncbi:NC_000001.11",
"interval": {
"start": {
"type": "Number",
"value": 15455
},
"end": {
"type": "Number",
"value": 15566
}
},
"type": "SequenceLocation"
},
"label": "NC_000001.11:15455-15566",
},
{
"id": "chr12:p12.1-p12.2",
"type": "LocationDescriptor",
"location": {
"species_id": "taxonomy:9606",
"chr": "12",
"interval": {"start": "p12.1", "end": "p12.2"}
},
"label": "chr12:p12.1-p12.2",
},
{
"id": "fusor.location_descriptor:NP_001123617.1",
"type": "LocationDescriptor",
"location": {
"sequence_id": "ga4gh:SQ.sv5egNzqN5koJQH6w0M4tIK9tEDEfJl7",
"type": "SequenceLocation",
"interval": {
"start": {
"type": "Number",
"value": 171
},
"end": {
"type": "Number",
"value": 204
}
}
}
},
{
"id": "fusor.location_descriptor:NP_002520.2",
"type": "LocationDescriptor",
"location": {
"sequence_id": "ga4gh:SQ.vJvm06Wl5J7DXHynR9ksW7IK3_3jlFK6",
"type": "SequenceLocation",
"interval": {
"start": {
"type": "Number",
"value": 510
},
"end": {
"type": "Number",
"value": 781
}
}
}
}
]
|
da13824ff6f91caa635700759a29fb1f36aae1be
| 30,448 |
from typing import Optional
def positional_features_gamma(positions: tf.Tensor,
feature_size: int,
seq_length: Optional[int] = None,
bin_size: Optional[int] = None,
stddev=None,
start_mean=None):
"""Positional features computed using the gamma distributions."""
del bin_size # Unused.
if seq_length is None:
seq_length = tf.reduce_max(tf.abs(positions)) + 1
if stddev is None:
stddev = seq_length / (2 * feature_size)
if start_mean is None:
start_mean = seq_length / feature_size
mean = tf.linspace(start_mean, seq_length, num=feature_size)
mean = _prepend_dims(mean, positions.shape.rank)
concentration = (mean / stddev)**2
rate = mean / stddev**2
probabilities = gamma_pdf(
tf.abs(tf.cast(positions, dtype=tf.float32))[..., tf.newaxis],
concentration, rate)
probabilities += 1e-8 # To ensure numerical stability.
outputs = probabilities / tf.reduce_max(probabilities)
tf.TensorShape(outputs.shape).assert_is_compatible_with(
positions.shape + [feature_size])
return outputs
|
20dc2154a194d99ec1a9931e10fca2b43d360a6e
| 30,449 |
from typing import List
def _get_frame_data(mapAPI: MapAPI, frame: np.ndarray, agents_frame: np.ndarray,
tls_frame: np.ndarray) -> FrameVisualization:
"""Get visualisation objects for the current frame.
:param mapAPI: mapAPI object (used for lanes, crosswalks etc..)
:param frame: the current frame (used for ego)
:param agents_frame: agents in this frame
:param tls_frame: the tls of this frame
:return: A FrameVisualization object. NOTE: trajectory are not included here
"""
ego_xy = frame["ego_translation"][:2]
#################
# plot lanes
lane_indices = indices_in_bounds(ego_xy, mapAPI.bounds_info["lanes"]["bounds"], 50)
active_tl_ids = set(filter_tl_faces_by_status(tls_frame, "ACTIVE")["face_id"].tolist())
lanes_vis: List[LaneVisualization] = []
for idx, lane_idx in enumerate(lane_indices):
lane_idx = mapAPI.bounds_info["lanes"]["ids"][lane_idx]
lane_tl_ids = set(mapAPI.get_lane_traffic_control_ids(lane_idx))
lane_colour = "gray"
for tl_id in lane_tl_ids.intersection(active_tl_ids):
lane_colour = COLORS[mapAPI.get_color_for_face(tl_id)]
lane_coords = mapAPI.get_lane_coords(lane_idx)
left_lane = lane_coords["xyz_left"][:, :2]
right_lane = lane_coords["xyz_right"][::-1, :2]
lanes_vis.append(LaneVisualization(xs=np.hstack((left_lane[:, 0], right_lane[:, 0])),
ys=np.hstack((left_lane[:, 1], right_lane[:, 1])),
color=lane_colour))
#################
# plot crosswalks
crosswalk_indices = indices_in_bounds(ego_xy, mapAPI.bounds_info["crosswalks"]["bounds"], 50)
crosswalks_vis: List[CWVisualization] = []
for idx in crosswalk_indices:
crosswalk = mapAPI.get_crosswalk_coords(mapAPI.bounds_info["crosswalks"]["ids"][idx])
crosswalks_vis.append(CWVisualization(xs=crosswalk["xyz"][:, 0],
ys=crosswalk["xyz"][:, 1],
color="yellow"))
#################
# plot ego and agents
agents_frame = np.insert(agents_frame, 0, get_ego_as_agent(frame))
box_world_coords = get_box_world_coords(agents_frame)
# ego
ego_vis = EgoVisualization(xs=box_world_coords[0, :, 0], ys=box_world_coords[0, :, 1],
color="red", center_x=agents_frame["centroid"][0, 0],
center_y=agents_frame["centroid"][0, 1])
# agents
agents_frame = agents_frame[1:]
box_world_coords = box_world_coords[1:]
agents_vis: List[AgentVisualization] = []
for agent, box_coord in zip(agents_frame, box_world_coords):
label_index = np.argmax(agent["label_probabilities"])
agent_type = PERCEPTION_LABELS[label_index]
agents_vis.append(AgentVisualization(xs=box_coord[..., 0],
ys=box_coord[..., 1],
color="#1F77B4" if agent_type not in COLORS else COLORS[agent_type],
track_id=agent["track_id"],
agent_type=PERCEPTION_LABELS[label_index],
prob=agent["label_probabilities"][label_index]))
return FrameVisualization(ego=ego_vis, agents=agents_vis, lanes=lanes_vis,
crosswalks=crosswalks_vis, trajectories=[])
|
65f3733e858f595877af83e0574bb53c5568390c
| 30,450 |
def calculate_gc(x):
"""Calculates the GC content of DNA sequence x.
x: a string composed only of A's, T's, G's, and C's."""
x = x.upper()
return float(x.count('G') + x.count('C')) / (x.count('G') + x.count('C') + x.count('A') + x.count('T'))
|
aae64ff550ef26e75518bdad8a12b7cda9e060d2
| 30,451 |
def no_float_zeros(v):
"""
if a float that is equiv to integer - return int instead
"""
if v % 1 == 0:
return int(v)
else:
return v
|
a33321408c43d164a8ca2c7f1d1bc6270e5708ec
| 30,453 |
import torch
def quat_mult(q_1, q_2):
"""Multiplication in the space of quaternions."""
a_1, b_1, c_1, d_1 = q_1[:, 0], q_1[:, 1], q_1[:, 2], q_1[:, 3]
a_2, b_2, c_2, d_2 = q_2[:, 0], q_2[:, 1], q_2[:, 2], q_2[:, 3]
q_1_q_2 = torch.stack(
(
a_1 * a_2 - b_1 * b_2 - c_1 * c_2 - d_1 * d_2,
a_1 * b_2 + b_1 * a_2 + c_1 * d_2 - d_1 * c_2,
a_1 * c_2 - b_1 * d_2 + c_1 * a_2 + d_1 * b_2,
a_1 * d_2 + b_1 * c_2 - c_1 * b_2 + d_1 * a_2,
),
dim=1,
)
return q_1_q_2
|
dac82e246221f9af552f44ca26089443b8eaadd7
| 30,454 |
def _flip_dict_keys_and_values(d):
"""Switches the keys and values of a dictionary. The input dicitonary is not modified.
Output:
dict
"""
output = {}
for key, value in d.items():
output[value] = key
return output
|
b861fc3bd194d26ee05b9a56faad3394939064bf
| 30,455 |
from typing import Tuple
from typing import Optional
def set_dative_bonds(
mol: Chem.rdchem.Mol, from_atoms: Tuple[int, int] = (7, 8)
) -> Optional[Chem.rdchem.Mol]:
"""Replaces some single bonds between metals and atoms with atomic numbers in fromAtoms
with dative bonds. The replacement is only done if the atom has "too many" bonds.
Arguments:
mol: molecule with bond to modify
from_atoms: List of atoms (symbol or atomic number) to consider for bond replacement.
By default, only Nitrogen (7) and Oxygen (8) are considered.
Returns:
The modified molecule.
"""
rwmol = Chem.RWMol(mol) # type: ignore
rwmol.UpdatePropertyCache(strict=False)
metals = [at for at in rwmol.GetAtoms() if is_transition_metal(at)]
for metal in metals:
for nbr in metal.GetNeighbors():
if (nbr.GetAtomicNum() in from_atoms or nbr.GetSymbol() in from_atoms) and (
nbr.GetExplicitValence() > PERIODIC_TABLE.GetDefaultValence(nbr.GetAtomicNum())
and rwmol.GetBondBetweenAtoms(nbr.GetIdx(), metal.GetIdx()).GetBondType()
== SINGLE_BOND
):
rwmol.RemoveBond(nbr.GetIdx(), metal.GetIdx())
rwmol.AddBond(nbr.GetIdx(), metal.GetIdx(), DATIVE_BOND)
return rwmol
|
8e67732e7f10ac273e51a0ae1b3f6c3cff27b291
| 30,456 |
from typing import Optional
def _b2s(b: Optional[bool]) -> Optional[str]:
"""转换布尔值为字符串。"""
return b if b is None else str(b).lower()
|
6030b7fd88b10c4bdccd12abd1f042c518e8a03f
| 30,457 |
from typing import Set
def color_csq(all_csq: Set[str], mane_csq: Set[str]) -> str:
"""
takes the collection of all consequences, and MANE csqs
if a CSQ occurs on MANE, write in bold,
if non-MANE, write in red
return the concatenated string
NOTE: I really hate how I've implemented this
:param all_csq:
:param mane_csq:
:return: the string filling the consequence box in the HTML
"""
csq_strings = []
for csq in all_csq:
# bold, in Black
if csq in mane_csq:
csq_strings.append(STRONG_STRING.format(content=csq))
# bold, and red
else:
csq_strings.append(COLOR_STRING.format(color=COLORS['1'], content=csq))
return ', '.join(csq_strings)
|
1e6792bf446799a4c4c22770c7abfc1718c88516
| 30,458 |
def hasattrs(object, *names):
"""
Takes in an object and a variable length amount of named attributes,
and checks to see if the object has each property. If any of the
attributes are missing, this returns false.
:param object: an object that may or may not contain the listed attributes
:param names: a variable amount of attribute names to check for
:return: True if the object contains each named attribute, false otherwise
"""
for name in names:
if not hasattr(object, name):
return False
return True
|
f3a2fc308d041ed0de79e3389e30e02660a1d535
| 30,459 |
def pano_stretch_image(pano_img, kx, ky, kz):
"""
Note that this is the inverse mapping, which refers to Equation 3 in HorizonNet paper (the coordinate system in
the paper is different from here, xz needs to be swapped)
:param pano_img: a panorama image, shape must be [h,w,c]
:param kx: stretching along left-right direction
:param ky: stretching along up-down direction
:param kz: stretching along front-back direction
:return:
"""
w = pano_img.shape[1]
h = pano_img.shape[0]
sin_lon, cos_lon, tan_lat = prepare_stretch(w, h)
n_lon = np.arctan2(sin_lon * kz / kx, cos_lon)
n_lat = np.arctan(tan_lat[..., None] * np.sin(n_lon) / sin_lon * kx / ky)
n_pu = lonlat2pixel(n_lon, w=w, axis=0, need_round=False)
n_pv = lonlat2pixel(n_lat, h=h, axis=1, need_round=False)
pixel_map = np.empty((h, w, 2), dtype=np.float32)
pixel_map[..., 0] = n_pu
pixel_map[..., 1] = n_pv
map1 = pixel_map[..., 0]
map2 = pixel_map[..., 1]
# using wrap mode because it is continues at left or right of panorama
new_img = cv2.remap(pano_img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_WRAP)
return new_img
|
f5b151e3e124e3333304b8ff6c217971fb10ba35
| 30,460 |
def blue_process(infile, masterbias=None, error=False, rdnoise=None, oscan_correct=False):
"""Process a blue frame
"""
# check to make sure it is a blue file
ccd = ccdproc.CCDData.read(infile, unit=u.adu)
try:
namps = ccd.header['CCDAMPS']
except KeyError:
namps = ccd.header['CCDNAMPS']
# reduce file
try:
blueamp = [ccd.header['AMPSEC'].strip()]
if oscan_correct:
bluescan = [ccd.header['BIASSEC'].strip()]
else:
bluescan = [None]
bluetrim = [ccd.header['DATASEC'].strip()]
#ugly hack for when two amps
if namps>1: raise Exception()
except:
blueamp = ['[1:1050,:]', '[1051:2100,:]']
if oscan_correct:
bluescan = ['[1:26,:]', '[1025:1050,:]']
else:
bluescan = [None, None]
bluetrim = ['[27:1050,:]', '[1:1024,:]']
flip = True
ccd = hrs_process(infile, ampsec=blueamp, oscansec=bluescan,
trimsec=bluetrim, masterbias=masterbias, error=error,
rdnoise=None, flip=flip)
#this is in place to deal with changes from one amp to two
if namps == 1:
ccd.data = ccd.data[:, ::-1]
if (ccd.mask is not None):
ccd.mask = ccd.mask[:, ::-1]
if (ccd.uncertainty is not None):
ccd.uncertainty = ccd.uncertainty[:, ::-1]
return ccd
|
c3b251a3ae99031b54e8dc5af4d5c95511f31c75
| 30,461 |
def _rand_sparse(m, n, density, format='csr'):
"""Helper function for sprand, sprandn"""
nnz = max(min(int(m*n*density), m*n), 0)
row = np.random.random_integers(low=0, high=m-1, size=nnz)
col = np.random.random_integers(low=0, high=n-1, size=nnz)
data = np.ones(nnz, dtype=float)
# duplicate (i,j) entries will be summed together
return sp.sparse.csr_matrix((data, (row, col)), shape=(m, n))
|
08221cdc9798e0ddf9266b5bf2ca3dfe21451278
| 30,462 |
import random
def _generate_trace(distance):
"""
生成轨迹
:param distance:
:return:
"""
# 初速度
v = 0
# 位移/轨迹列表,列表内的一个元素代表0.02s的位移
tracks_list = []
# 当前的位移
current = 0
while current < distance - 3:
# 加速度越小,单位时间的位移越小,模拟的轨迹就越多越详细
a = random.randint(10000, 12000) # 加速运动
# 初速度
v0 = v
t = random.randint(9, 18)
s = v0 * t / 1000 + 0.5 * a * ((t / 1000) ** 2)
# 当前的位置
current += s
# 速度已经达到v,该速度作为下次的初速度
v = v0 + a * t / 1000
# 添加到轨迹列表
if current < distance:
tracks_list.append(round(current))
# 减速慢慢滑
if round(current) < distance:
for i in range(round(current) + 1, distance + 1):
tracks_list.append(i)
else:
for i in range(tracks_list[-1] + 1, distance + 1):
tracks_list.append(i)
y_list = []
zy = 0
for j in range(len(tracks_list)):
y = random.choice(
[0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
-1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0])
zy += y
y_list.append(zy)
j += 1
base_y = str(-random.randint(330, 350))
trace = [['0', base_y], ['0', base_y], ['0', base_y]]
for index, x in enumerate(tracks_list):
trace.append([str(x), str(y_list[index])])
t_last = trace[-1]
for _ in range(random.randint(4, 6)):
trace.append(t_last)
return trace
|
6ce298de2a1977c7662f83346e7554c546b41131
| 30,464 |
import sqlite3
def update_nt_uid_acc(cachepath, uid, accession):
"""Update nt UID GenBank accession."""
# Path must be string, not PosixPath, in Py3.6
conn = sqlite3.connect(str(cachepath))
results = []
with conn:
cur = conn.cursor()
cur.execute(SQL_UPDATE_UID_ACC, (accession, uid))
results.append(cur.fetchone())
return results
|
45c9514ffeca9281269c8f27ac09b3b863de2eff
| 30,465 |
def bool_env(env_val):
""" check for boolean values """
if env_val:
if env_val in TRUE_LIST:
return True
if env_val in FALSE_LIST:
return False
# print("Return:%s" % env_val)
return env_val
else:
if env_val in FALSE_LIST:
return False
# print("Returning:%s" % env_val)
return
|
afb9d2f35c6469a1fd6f32250376b94a05db1de0
| 30,466 |
import json
def try_parse_json(json_):
"""Converts the string representation of JSON to JSON.
:param str json_: JSON in str representation.
:rtype: :class:`dict` if converted successfully, otherwise False.
"""
if not json_:
return False
try:
return json.loads(json_)
except ValueError:
return False
|
077819cf82e307aacf3e56b11fbba26a79559968
| 30,467 |
def svn_ra_get_file(*args):
"""
svn_ra_get_file(svn_ra_session_t session, char path, svn_revnum_t revision,
svn_stream_t stream, apr_pool_t pool) -> svn_error_t
"""
return _ra.svn_ra_get_file(*args)
|
2e887ace5ed3538ac7f3e401be9fa71ddfc100cc
| 30,468 |
def version_microservices(full=True):
"""
Display Zoomdata microservice packages version.
CLI Example:
full : True
Return full version. If set False, return only short version (X.Y.Z).
.. code-block:: bash
salt '*' zoomdata.version_microservices
"""
ms_version = ''
ms_pkgs = list_pkgs_microservices()
for pkg in ms_pkgs:
# pylint: disable=undefined-variable
ms_version = __salt__['pkg.version'](pkg)
if not full:
return ms_version.split('-')[0]
break
return ms_version
|
aff0ece640e33e2d880c3e4a13ad7e13911aaa68
| 30,469 |
def KGCOVID19(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "current", **kwargs
) -> Graph:
"""Return kg-covid-19 graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "current"
Version to retrieve
The available versions are:
- 20200925
- 20200927
- 20200929
- 20201001
- 20201012
- 20201101
- 20201202
- 20210101
- 20210128
- 20210201
- 20210218
- 20210301
- 20210412
- 20210725
- 20210726
- 20210727
- 20210823
- 20210902
- 20211002
- 20211102
- 20211202
- 20220102
- 20220202
- 20220217
- 20220223
- 20220225
- 20220228
- 20220328
- 20220330
- 20220402
- 20220502
- current
References
----------
Please cite:
```bib
@article{reese2021kg,
title={KG-COVID-19: a framework to produce customized knowledge graphs for COVID-19 response},
author={Reese, Justin T and Unni, Deepak and Callahan, Tiffany J and Cappelletti, Luca and Ravanmehr, Vida and Carbon, Seth and Shefchek, Kent A and Good, Benjamin M and Balhoff, James P and Fontana, Tommaso and others},
journal={Patterns},
volume={2},
number={1},
pages={100155},
year={2021},
publisher={Elsevier}
}
```
"""
return AutomaticallyRetrievedGraph(
"KGCOVID19", version, "kghub", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
|
24e419d80cd9634f5dab352f37db1fd6a19661d2
| 30,473 |
def is_gh_online():
"""
Check if GitHub is online.
The different services of GitHub are running in seperat services
and thus just being GitHub online does not mean,
that required parts are online.
"""
return _is_online("github.com", "/", 200, "OK")
|
8c5dc7090f9d851e5b5c303ffa376da5f926202a
| 30,475 |
import math
def get_items_with_pool(
source_key: str, count: int, start_index: int = 0, workers: int = 4
) -> Items:
"""Concurrently reads items from API using Pool
Args:
source_key: a job or collection key, e.g. '112358/13/21'
count: a number of items to retrieve
start_index: an index to read from
workers: the number of separate processors to get data in
Returns:
A list of items
"""
active_connections_limit = 10
processes_count = min(max(helpers.cpus_count(), workers), active_connections_limit)
batch_size = math.ceil(count / processes_count)
items = []
with Pool(processes_count) as p:
results = p.starmap(
partial(get_items, source_key, batch_size, child=True),
zip([i for i in range(start_index, start_index + count, batch_size)]),
)
for items_batch in results:
items.extend(items_batch)
return items
|
cbf07015872fc72bfa0e70be5c6a4553e5bef363
| 30,476 |
from typing import Tuple
def calc_portfolio_holdings(initial_investment: int, weights: pd.DataFrame, prices: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Calculate the initial portfolio holdings given am amount of cash to invest.
:param initial_investment: The initial investment used to purchase the portfolio (no partial shares)
:param weights: a data frame containing the the weights for each asset as symbol: fraction
:param prices: the share prices
:return: the dollar value of the share holdings and the number of shares
"""
weights_np: np.array = np.zeros(weights.shape[1])
prices_np: np.array = np.zeros(weights.shape[1])
for ix, col in enumerate(weights.columns):
weights_np[ix] = weights[col]
prices_np[ix] = prices[col]
budget_np = weights_np * float(initial_investment)
shares = budget_np // prices_np
holdings = shares * prices_np
holdings_df: pd.DataFrame = pd.DataFrame(holdings).transpose()
holdings_df.columns = weights.columns
shares_df: pd.DataFrame = pd.DataFrame(shares).transpose()
shares_df.columns = weights.columns
return holdings_df, shares_df
|
7adf46894b27c679eec16f94a80bcad3c7539c88
| 30,478 |
def ford_fulkerson(G, s, t, capacity='capacity'):
"""Find a maximum single-commodity flow using the Ford-Fulkerson
algorithm.
This is the legacy implementation of maximum flow. See Notes below.
This algorithm uses Edmonds-Karp-Dinitz path selection rule which
guarantees a running time of `O(nm^2)` for `n` nodes and `m` edges.
Parameters
----------
G : NetworkX graph
Edges of the graph are expected to have an attribute called
'capacity'. If this attribute is not present, the edge is
considered to have infinite capacity.
s : node
Source node for the flow.
t : node
Sink node for the flow.
capacity : string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
Returns
-------
R : NetworkX DiGraph
The residual network after computing the maximum flow. This is a
legacy implementation, se Notes and Examples.
Raises
------
NetworkXError
The algorithm does not support MultiGraph and MultiDiGraph. If
the input graph is an instance of one of these two classes, a
NetworkXError is raised.
NetworkXUnbounded
If the graph has a path of infinite capacity, the value of a
feasible flow on the graph is unbounded above and the function
raises a NetworkXUnbounded.
See also
--------
:meth:`maximum_flow`
:meth:`minimum_cut`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
Notes
-----
This is a legacy implementation of maximum flow (before 1.9).
This function used to return a tuple with the flow value and the
flow dictionary. Now it returns the residual network resulting
after computing the maximum flow, in order to follow the new
interface to flow algorithms introduced in NetworkX 1.9.
Note however that the residual network returned by this function
does not follow the conventions for residual networks used by the
new algorithms introduced in 1.9. This residual network has edges
with capacity equal to the capacity of the edge in the original
network minus the flow that went throught that edge. A dictionary
with infinite capacity edges can be found as an attribute of the
residual network.
Examples
--------
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_edge('x','a', capacity=3.0)
>>> G.add_edge('x','b', capacity=1.0)
>>> G.add_edge('a','c', capacity=3.0)
>>> G.add_edge('b','c', capacity=5.0)
>>> G.add_edge('b','d', capacity=4.0)
>>> G.add_edge('d','e', capacity=2.0)
>>> G.add_edge('c','y', capacity=2.0)
>>> G.add_edge('e','y', capacity=3.0)
This function returns the residual network after computing the
maximum flow. This network has graph attributes that contain:
a dictionary with edges with infinite capacity flows, the flow
value, and a dictionary of flows:
>>> R = nx.ford_fulkerson(G, 'x', 'y')
>>> # A dictionary with infinite capacity flows can be found as an
>>> # attribute of the residual network
>>> inf_capacity_flows = R.graph['inf_capacity_flows']
>>> # There are also attributes for the flow value and the flow dict
>>> flow_value = R.graph['flow_value']
>>> flow_dict = R.graph['flow_dict']
You can use the interface to flow algorithms introduced in 1.9 to get
the output that the function ford_fulkerson used to produce:
>>> flow_value, flow_dict = nx.maximum_flow(G, 'x', 'y',
... flow_func=nx.ford_fulkerson)
"""
flow_value, R = ford_fulkerson_impl(G, s, t, capacity=capacity)
flow_dict = _create_flow_dict(G, R, capacity=capacity)
R.graph['flow_value'] = flow_value
R.graph['flow_dict'] = flow_dict
R.graph['algorithm'] = 'ford_fulkerson_legacy'
return R
|
dd8e8e351829feb98cd144f4148f8c9bf62cb239
| 30,479 |
def index():
"""
Show the main page of Stream4Flow
:return: Empty dictionary
"""
# Do not save the session
session.forget(response)
return dict()
|
531c10ca17406086fcf14a5dfcdcecce5cc60119
| 30,480 |
from typing import Type
def create_temporary_table_sql(model: Type[Model]) -> str:
"""
Get the SQL required to represent the given model in the database as a
temporary table.
We cache the results as this will be called for each request, but the model
should never change (outside of tests), so we can use a very small cache.
"""
# Need to use _meta, so disable protected property access checks
# pylint: disable=protected-access
# For each field, generate the required SQL to add that field to the table
definition = ", ".join(
_column_sql(field)
for field in model._meta.get_fields() # noqa
if isinstance(field, Field)
)
sql = f'CREATE TEMPORARY TABLE "{model._meta.db_table}" ({definition})'
return sql
|
3074b4e6bb0c9147faa9da3243d0d66a3fee7517
| 30,481 |
def field_paths(h5, key='externalFieldPath'):
"""
Looks for the External Fields
"""
if key not in h5.attrs:
return []
fpath = h5.attrs[key].decode('utf-8')
if '%T' not in fpath:
return [fpath]
path1 = fpath.split('%T')[0]
tlist = list(h5[path1])
paths = [path1+t for t in tlist]
return paths
|
578e1a2d0971a94afa665f368e9b72c8f6e449d3
| 30,482 |
def get_quantifier(ch, input_iter):
"""
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Return the minimum number of occurrences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
"""
if ch in '*?+':
try:
ch2, escaped = next(input_iter)
except StopIteration:
ch2 = None
if ch2 == '?':
ch2 = None
if ch == '+':
return 1, ch2
return 0, ch2
quant = []
while ch != '}':
ch, escaped = next(input_iter)
quant.append(ch)
quant = quant[:-1]
values = ''.join(quant).split(',')
# Consume the trailing '?', if necessary.
try:
ch, escaped = next(input_iter)
except StopIteration:
ch = None
if ch == '?':
ch = None
return int(values[0]), ch
|
36dea445aa416be79e86bb1e7c6f9dbe454c6c2a
| 30,483 |
from typing import List
def get_defined_vars(
operation: "OperationDefinitionNode",
) -> List["VariableNode"]:
"""
Retrieve a list of VariableNode defined inside the variableDefinitionNode list of an OperationDefinitionNode
:param operation: the operation definition node to look through
:type operation: "OperationDefinitionNode"
:return: The List of VariableNode that was buried in the list of VariableDefinitionNode of the given OperationDefinitionNode
:rtype: List["VariableNode"]
"""
return [x.variable for x in operation.variable_definitions]
|
f39cd2b205bdfba5347884e9b675949e08877a5f
| 30,484 |
def snv(img):
"""
standard normal variates (SNV) transformation of spectral data
"""
mean = np.mean(img, axis=0)
std = np.std(img, axis=0)
return (img - mean[np.newaxis, ...])/std[np.newaxis, ...]
|
63c549f1e319ab4cc4b4beb4ea602b136d71168f
| 30,485 |
def fredkin(cell: int, live_count: int, neighbors: Neighbors = None) -> int:
"""\"Fredkin\" Game of Life rule
This rule can be specified using these strings:
- ``B1357/S02468``
- ``2468/1357``
- ``fredkin``
Parameters
----------
cell: int
Value of the current cell. Can be ``1`` (alive) or ``0`` (dead)
live_count: int
Count of cells alive (``1``) around the current cell.
neighbors: Iterator[Tuple[int, int, int]], optional
Iterator yielding the value, the x- and the y-coordinate of the
individual neighbors. This parameters might only be required by very few
rules and is present in every game rule for consistency.
Returns
-------
int
Computed value of the current cell. Can be ``1`` (alive) or ``0`` (dead).
Notes
-----
The value of ``live_count`` depends on the type of neighborhood you use.
PyGoL uses the Moore neighborhood by default. See the LifeWiki for more
information on types of neighborhood:
https://www.conwaylife.com/wiki/Cellular_automaton#Common_dimensions_and_neighborhoods
References
----------
Find this rule in the LifeWiki:
https://www.conwaylife.com/wiki/OCA:Replicator#Replicator_2
"""
if (live_count + cell) % 2 == 1:
return 1
return 0
|
ef08d99fa90c6d615bf3aabd6a4fe72903ecfb62
| 30,486 |
def mad(arr):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
arr = np.ma.array(arr).compressed() # should be faster to not use masked arrays.
med = np.median(arr)
return np.median(np.abs(arr - med))
|
f02919fcb082c815602d8b57cb59d5c71cb6a219
| 30,487 |
def get_logs_directory():
"""Return path of logs directory"""
LDAModel_directory = get_LDAModel_directory()
logs_directory = LDAModel_directory / 'logs'
if not logs_directory.is_dir():
create_directory(logs_directory)
return logs_directory
|
eb37ca64a07280d55584ac0146b6babc02051b3d
| 30,488 |
def svn_ra_rev_proplist(*args):
"""
svn_ra_rev_proplist(svn_ra_session_t session, svn_revnum_t rev, apr_hash_t props,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_rev_proplist, args)
|
2f6c1f25f6b62d1306aa746dc0dc04f39370aa0e
| 30,489 |
from datetime import datetime
def parse_date(s):
"""
Given a string matching the 'full-date' production above, returns
a datetime.date instance. Any deviation from the allowed format
will produce a raised ValueError.
>>> parse_date("2008-08-24")
datetime.date(2008, 8, 24)
>>> parse_date(" 2008-08-24 ")
datetime.date(2008, 8, 24)
>>> parse_date("2008-08-00")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 134, in parse_date
return datetime.date(int(y), int(m), int(d))
ValueError: day is out of range for month
>>> parse_date("2008-06-31")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 134, in parse_date
return datetime.date(int(y), int(m), int(d))
ValueError: day is out of range for month
>>> parse_date("2008-13-01")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 134, in parse_date
return datetime.date(int(y), int(m), int(d))
ValueError: month must be in 1..12
>>> parse_date("22008-01-01")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 136, in parse_date
raise ValueError('Invalid RFC 3339 date string', s)
ValueError: ('Invalid RFC 3339 date string', '22008-01-01')
>>> parse_date("2008-08-24").isoformat()
'2008-08-24'
"""
m = date_re.match(s)
if m:
(y, m, d) = m.groups()
return datetime.date(int(y), int(m), int(d))
else:
raise ValueError('Invalid RFC 3339 date string', s)
|
d21ab8b08d52bf155d5e8af192f19036307568b5
| 30,491 |
def setup_config(quiz_name):
"""Updates the config.toml index and dataset field with the formatted
quiz_name. This directs metapy to use the correct files
Keyword arguments:
quiz_name -- the name of the quiz
Returns:
True on success, false if fials to open file
"""
try:
conf_file = open("config.toml", 'r')
lines = conf_file.readlines()
conf_file.close()
for i in range(len(lines)):
if lines[i].startswith("index"):
lines[i] = "index = 'idx-{0}'\n".format(quiz_name.replace(" ", "_"))
if lines[i].startswith("dataset"):
lines[i] = "dataset = '{0}'\n".format(quiz_name.replace(" ", "_"))
conf_file = open("config.toml", 'w')
with conf_file:
conf_file.writelines(lines)
except Exception as e:
print(e)
return False
return True
|
28aba9399926f27da89953c8b0c6b41d95a12d96
| 30,492 |
def unitary_connection(h_pre, h_post, n_pre, n_post, X):
"""
Gives the connectivity value between the n_pre unit
in the h_pre hypercolumn and the n_post unit in the h_post column
"""
hits_pre = X[:, h_pre] == n_pre
hits_post = X[:, h_post] == n_post
return np.sum(hits_pre * hits_post)
|
d272b92abfed50d7f89c2d4b40ff3c4e5a417d5e
| 30,493 |
def mase(y, y_hat, y_train, seasonality=1):
"""Calculates the M4 Mean Absolute Scaled Error.
MASE measures the relative prediction accuracy of a
forecasting method by comparinng the mean absolute errors
of the prediction and the true value against the mean
absolute errors of the seasonal naive model.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array
predicted values
y_train: numpy array
actual train values for Naive1 predictions
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
Return
------
scalar: MASE
"""
scale = np.mean(abs(y_train[seasonality:] - y_train[:-seasonality]))
mase = np.mean(abs(y - y_hat)) / scale
mase = 100 * mase
return mase
|
7373ef660ae9784ecd83a83457c143debb721685
| 30,494 |
def _resize_along_axis(inputs, size, axis, **kwargs):
""" Resize 3D input tensor to size along just one axis. """
except_axis = (axis + 1) % 3
size, _ = _calc_size_after_resize(inputs, size, axis)
output = _resize_except_axis(inputs, size, except_axis, **kwargs)
return output
|
a7cc206171ffe1cf6df22da3756cefcc6c5fcd86
| 30,495 |
def getTournamentMatches(tourneyId):
"""
Return a dictionary from match id to match data for a tournament.
"""
if tourneyId not in matchDatas:
refreshMatchIndex(tourneyId)
return matchDatas[tourneyId]
|
a6d5386e9034b126405aedee75ba36b1718c3dc9
| 30,496 |
def compute_protien_mass(protien_string):
"""
test case
>>> compute_protien_mass('SKADYEK')
821.392
"""
p={'A':'71.03711','C':'103.00919','D':'115.02694','E':'129.04259','F':'147.06841','G':'57.02146','H':'137.05891','I':'113.08406','K':'128.09496','L':'113.08406','M':'131.04049','N':'114.04293','P':'97.05276','Q':'128.05858','R':'156.10111','S':'87.03203','T':'101.04768','V':'99.06841','W':'186.07931','Y':'163.06333'}
mass=0
for x in protien_string:
mass=mass+float(p[x])
#to change number of values after decimel point to 3
mass=round(mass,3)
return mass
|
86a3ffd0ce3e95fcdf6d510d2865b35aeb93d779
| 30,497 |
import logging
def find_duration(data):
"""Finds the duration of the ECG data sequence
Finds the duration by looking at the last time value
as the first value is always at time = 0 seconds
:param data: 2D array of time sequences and voltage sequences
:return: Time duration of data sequence
"""
logging.info("Detecting Duration of Data Stream...\n")
return data[:, 0][-1]
|
e65135457e23886c402e0671d720fe9c5ed257a1
| 30,498 |
def response(data, **kwd):
"""Returns a http response"""
return HttpResponse(data, **kwd)
|
8d88295751ee5f53f99ea8a134d01e1df7cb1fd5
| 30,499 |
from typing import List
def load_actions(action_file: str) -> List[str]:
"""
Load unique actions from an action file
"""
return load_uniq_lines(action_file)
|
a5ffe3ccac462bc8277da6174a5eb81071a6fb84
| 30,500 |
def ConvertFile(filename_in, filename_out, loglevel='INFO'):
"""
Converts an ANSYS input file to a python pyansys script.
Parameters
----------
filename_in : str
Filename of the ansys input file to read in.
filename_out : str
Filename of the python script to write a translation to.
Returns
-------
clines : list
List of lines translated
"""
clines = []
with open(filename_in) as file_in:
with open(filename_out, 'w') as file_out:
file_out.write('import pyansys\n')
file_out.write('ansys = pyansys.ANSYS(loglevel="%s")\n' % loglevel)
for line in file_in.readlines():
cline = ConvertLine(line)
file_out.write(cline)
clines.append(cline)
cline = 'ansys.Exit()\n'
file_out.write(cline)
clines.append(cline)
return clines
|
b044b565193ca0d78edd77706cdeb1711d95f063
| 30,502 |
from typing import Optional
def get_metadata_saml(idp_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMetadataSamlResult:
"""
Use this data source to retrieve SAML IdP metadata from Okta.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
example = okta.idp.get_metadata_saml(id="<idp id>")
```
:param str idp_id: The id of the IdP to retrieve metadata for.
"""
__args__ = dict()
__args__['idpId'] = idp_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('okta:idp/getMetadataSaml:getMetadataSaml', __args__, opts=opts, typ=GetMetadataSamlResult).value
return AwaitableGetMetadataSamlResult(
assertions_signed=__ret__.assertions_signed,
authn_request_signed=__ret__.authn_request_signed,
encryption_certificate=__ret__.encryption_certificate,
entity_id=__ret__.entity_id,
http_post_binding=__ret__.http_post_binding,
http_redirect_binding=__ret__.http_redirect_binding,
id=__ret__.id,
idp_id=__ret__.idp_id,
metadata=__ret__.metadata,
signing_certificate=__ret__.signing_certificate)
|
5b4e1fdf72d7e11d7f4eee878e88e174b452cfa5
| 30,503 |
from typing import Dict
def _average_latency(row: Dict):
"""
Calculate average latency for Performance Analyzer single test
"""
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum(int(row.get(f, 0)) for f in avg_sum_fields)
return avg_latency
|
f321cb4d55af605298225f2f0146a9a71ee7895b
| 30,504 |
def _(expr, assumptions):
"""
Integer**Integer -> !Prime
"""
if expr.is_number:
return _PrimePredicate_number(expr, assumptions)
if ask(Q.integer(expr.exp), assumptions) and \
ask(Q.integer(expr.base), assumptions):
return False
|
da1b018ef6fdb6987666806ce34ee784d03cff9b
| 30,506 |
import tqdm
import torch
def train(model, trainLoader, optimizer, loss_function, device, trainParams):
"""
Function to train the model for one iteration. (Generally, one iteration = one epoch, but here it is one step).
It also computes the training loss, CER and WER. The CTC decode scheme is always 'greedy' here.
"""
trainingLoss = 0
trainingCER = 0
trainingWER = 0
for batch, (inputBatch, targetBatch, inputLenBatch, targetLenBatch) in enumerate(tqdm(trainLoader, leave=False, desc="Train",
ncols=75)):
inputBatch, targetBatch = (inputBatch.float()).to(device), (targetBatch.int()).to(device)
inputLenBatch, targetLenBatch = (inputLenBatch.int()).to(device), (targetLenBatch.int()).to(device)
optimizer.zero_grad()
model.train()
outputBatch = model(inputBatch)
with torch.backends.cudnn.flags(enabled=False):
loss = loss_function(outputBatch, targetBatch, inputLenBatch, targetLenBatch)
loss.backward()
optimizer.step()
trainingLoss = trainingLoss + loss.item()
predictionBatch, predictionLenBatch = ctc_greedy_decode(outputBatch.detach(), inputLenBatch, trainParams["eosIx"])
trainingCER = trainingCER + compute_cer(predictionBatch, targetBatch, predictionLenBatch, targetLenBatch)
trainingWER = trainingWER + compute_wer(predictionBatch, targetBatch, predictionLenBatch, targetLenBatch, trainParams["spaceIx"])
trainingLoss = trainingLoss/len(trainLoader)
trainingCER = trainingCER/len(trainLoader)
trainingWER = trainingWER/len(trainLoader)
return trainingLoss, trainingCER, trainingWER
|
f2726ad6f63997abd670c5f4614b1cef1e35dec7
| 30,507 |
def reorder(A, B):
"""Change coefficient order from y**2 xy x**2 to x**2 xy y**2 in both A and B.
Parameters
----------
A : array
polynomial coefficients
B : array
polynomial coefficients
Returns
-------
A2, B2: numpy arrays
coefficients with changed order
"""
poly_degree = polynomial_degree(len(A))
A2 = np.zeros((len(A)))
B2 = np.zeros((len(B)))
for i in range(poly_degree + 1):
ti = i * (i + 1) // 2
for j in range(i + 1):
A2[ti + j] = A[ti + i - j]
B2[ti + j] = B[ti + i - j]
return A2, B2
|
6a4740a0423bc3a804e66b0cda4a444a7f58072e
| 30,508 |
import functools
import collections
def collate_revs(old, new, key=lambda x: x, merge=lambda old, new: new):
"""
Given revision sets old and new, each containing a series
of revisions of some set of objects, collate them based on
these rules:
- all items from each set are yielded in stable order
- items in old are yielded first
- items in new are yielded last
- items that match are yielded in the order in which they
appear, giving preference to new
Items match based on the 'key' parameter (identity by default).
Items are merged using the 'merge' function, which accepts the old
and new items to be merged (returning new by default).
This algorithm requires fully materializing both old and new in memory.
>>> rev1 = ['a', 'b', 'c']
>>> rev2 = ['a', 'd', 'c']
>>> result = list(collate_revs(rev1, rev2))
'd' must appear before 'c'
>>> result.index('d') < result.index('c')
True
'b' must appear before 'd' because it came chronologically
first.
>>> result.index('b') < result.index('d')
True
>>> result
['a', 'b', 'd', 'c']
>>> list(collate_revs(['a', 'b', 'c'], ['d']))
['a', 'b', 'c', 'd']
>>> list(collate_revs(['b', 'a'], ['a', 'b']))
['a', 'b']
>>> list(collate_revs(['a', 'c'], ['a', 'b', 'c']))
['a', 'b', 'c']
Given two sequences of things out of order, regardless
of which order in which the items are merged, all
keys should always be merged.
>>> from more_itertools import consume
>>> left_items = ['a', 'b', 'c']
>>> right_items = ['a', 'c', 'b']
>>> consume(collate_revs(left_items, right_items, merge=print))
a a
c c
b b
>>> consume(collate_revs(right_items, left_items, merge=print))
a a
b b
c c
The merge should not suppress non-True items:
>>> consume(collate_revs([0, 1, 2, None, ''], [0, None, ''], merge=print))
None None
<BLANKLINE>
0 0
"""
missing = object()
def maybe_merge(*items):
"""
Merge any non-null items
"""
def not_missing(ob):
return ob is not missing
return functools.reduce(merge, filter(not_missing, items))
new_items = collections.OrderedDict((key(el), el) for el in new)
old_items = collections.OrderedDict((key(el), el) for el in old)
# use the old_items as a reference
for old_key, old_item in _mutable_iter(old_items):
if old_key not in new_items:
yield old_item
continue
# yield all new items that appear before the matching key
before, match_new, new_items = _swap_on_miss(partition_dict(new_items, old_key))
for new_key, new_item in before.items():
# ensure any new keys are merged with previous items if
# they exist
yield maybe_merge(new_item, old_items.pop(new_key, missing))
yield merge(old_item, match_new)
# finally, yield whatever is leftover
# yield from new_items.values()
for item in new_items.values():
yield item
|
06f37d895fd906513aa3b85fb2ff48e0f2f2b625
| 30,509 |
from typing import Tuple
from typing import List
def load_conversation(
filename: str,
dictionary: corpora.Dictionary,
with_symbol: bool=True
) -> (Tuple[List[int], List[int]]):
"""対話コーパスをロードする。
Args:
filename (str): コーパスファイル
コーパスファイルの一行は
何 が 好き です か ?,Python が 好き です 。
のように、(source string, separator, target string) が単語ごとに
スペースで分割された文字列が格納されている必要がある。
dictionary:
with_symbol:
Returns:
Tuple[List[int], List[int]]
([1, 2, 3], [4, 5, 6])
"""
# load conversation sentences
if with_symbol:
tokens = [(
# [config.START_SYMBOL] + src.split() + [config.END_SYMBOL],
# [config.START_SYMBOL] + dst.split() + [config.END_SYMBOL]
list(src.split()) + [config.END_SYMBOL],
[config.END_SYMBOL] + dst.split() + [config.END_SYMBOL]
) for src, dst in (sent.split(config.SEPARATOR)
for sent in open(filename))
]
else:
tokens = [
(list(src.split()), dst.split())
for src, dst in (sent.split(config.SEPARATOR)
for sent in open(filename))
]
print("loaded sentences from {}".format(filename))
return tokens
|
fb7aec9ea228fe528d6744564c1a2b298a33575c
| 30,510 |
def close_incons_reduction(incons: list):
"""
Two step:
0. under the same backends pair
1. the same input, choose largest.(done before)
* 2. different inputs with small distance. Do not update(not used)
"""
def is_duplicate(t: tuple, li: list):
"""unique inconsistency"""
for l in li:
if abs(t[1] - l[1]) <= distance_threshold:
return True,l
return False,None
result = list()
relation_dict = dict()
for incon in incons:
status, l = is_duplicate(incon, result)
if not status:
result.append(incon)
else:
relation_dict[incon] = l
return result,relation_dict
|
5fd581471ff361d2351b2dd8285d606399667e21
| 30,511 |
def mf2tojf2(mf2):
"""I'm going to have to recurse here"""
jf2={}
items = mf2.get("items",[])
jf2=flattenProperties(items,isOuter=True)
#print jf2
return jf2
|
399fa35f592bb6ec042003ed2884f94078ac01fd
| 30,512 |
def import_all(filename):
""" Imports file contents from user with parameters from nueral net and later calculations
currently not robust to missing or incorrect arguments from file
currently does not convert values to int; done in later functions
inputs: filename - name of input file, currently as inputs.csv
output: file_list - list of parameters specified by user"""
with open(filename,'r') as file:
file_read = csv.reader(file)
file_list = []
for line in file_read:
file_list.append(line)
print "Imported file: " + str(filename)
return file_list
|
ab5b2fecb6cadd2754d52cc9333d110955ca10c7
| 30,513 |
from typing import Union
from pathlib import Path
from typing import Dict
from typing import Tuple
from typing import Any
def fill_database(path: Union[str, Path], settings: SettingsConfig,
inputs: MeasurementInputs, alchemy: Alchemy,
parent_location_id: int, sex_id: int, child_prior: Dict[str, Dict[str, np.ndarray]],
mulcov_prior: Dict[Tuple[str, str, str], _Prior],
options: Dict[str, Any]) -> DismodFiller:
"""
Fill a DisMod database at the specified path with the inputs, model, and settings
specified, for a specific parent and sex ID, with options to override the priors.
"""
df = DismodFiller(
path=path, settings_configuration=settings, measurement_inputs=inputs,
grid_alchemy=alchemy, parent_location_id=parent_location_id, sex_id=sex_id,
child_prior=child_prior, mulcov_prior=mulcov_prior,
)
df.fill_for_parent_child(**options)
return df
|
39836b397a7cf384bf3ca493914f9d024baf9f6f
| 30,514 |
def ydhms2dt(year,doy,hh,mm,ss):
"""
ydhms2dt Take a year, day-of-year, etc and convert it into a date time object
Usage: dto = ydhms2dt(year,day,hh,mm,ss)
Input: year - 4 digit integer
doy - 3 digit, or less integer, (1 <= doy <= 366)
hh - 2 digit, or less int, (0 <= hh < 24)
mm - 2 digit, or less int,(0 <= ss < 60)
ss - float
Output: 'dto' a date time object
"""
#
# need to split seconds into two components
# sec => 2 digit, or less int, (0 <= ss < 60)
# ms => int 0 <= ms < 1,000,000
ms,sec = modf(float(ss))
ms = ms * 10e5
dto = dt.datetime(int(year),01,01,int(hh),int(mm),int(sec),int(ms))
dto = dto + dt.timedelta(days=(int(doy) - 1))
return dto
|
3d8fd1a6086f3dd35c80c2d862e820b7aecc5e5b
| 30,515 |
def create_test_db(verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# If the database backend wants to create the test DB itself, let it
creation_module = get_creation_module()
if hasattr(creation_module, "create_test_db"):
creation_module.create_test_db(settings, connection, verbosity, autoclobber)
return
if verbosity >= 1:
print "Creating test database..."
# If we're using SQLite, it's more convenient to test against an
# in-memory database. Using the TEST_DATABASE_NAME setting you can still choose
# to run on a physical database.
if settings.DATABASE_ENGINE == "sqlite3":
if settings.TEST_DATABASE_NAME and settings.TEST_DATABASE_NAME != ":memory:":
TEST_DATABASE_NAME = settings.TEST_DATABASE_NAME
# Erase the old test database
if verbosity >= 1:
print "Destroying old test database..."
if os.access(TEST_DATABASE_NAME, os.F_OK):
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % TEST_DATABASE_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database..."
os.remove(TEST_DATABASE_NAME)
except Exception, e:
sys.stderr.write("Got an error deleting the old test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
if verbosity >= 1:
print "Creating test database..."
else:
TEST_DATABASE_NAME = ":memory:"
else:
suffix = {
'postgresql': get_postgresql_create_suffix,
'postgresql_psycopg2': get_postgresql_create_suffix,
'mysql': get_mysql_create_suffix,
'mysql_old': get_mysql_create_suffix,
}.get(settings.DATABASE_ENGINE, lambda: '')()
if settings.TEST_DATABASE_NAME:
TEST_DATABASE_NAME = settings.TEST_DATABASE_NAME
else:
TEST_DATABASE_NAME = TEST_DATABASE_PREFIX + settings.DATABASE_NAME
qn = connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = connection.cursor()
_set_autocommit(connection)
try:
cursor.execute("CREATE DATABASE %s %s" % (qn(TEST_DATABASE_NAME), suffix))
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % TEST_DATABASE_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database..."
cursor.execute("DROP DATABASE %s" % qn(TEST_DATABASE_NAME))
if verbosity >= 1:
print "Creating test database..."
cursor.execute("CREATE DATABASE %s %s" % (qn(TEST_DATABASE_NAME), suffix))
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
connection.close()
settings.DATABASE_NAME = TEST_DATABASE_NAME
call_command('syncdb', verbosity=verbosity, interactive=False)
if settings.CACHE_BACKEND.startswith('db://'):
cache_name = settings.CACHE_BACKEND[len('db://'):]
call_command('createcachetable', cache_name)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = connection.cursor()
return TEST_DATABASE_NAME
|
87659029e01399f6d46780dbd3e2809bc809b70c
| 30,516 |
import _ctypes
def key_import(data, key_type=KEY_TYPE.SYMMETRIC, password=b''):
"""Imports a key or key generation parameters."""
key = _ctypes.c_void_p()
_lib.yaca_key_import(key_type.value, _ctypes.c_char_p(password),
data, len(data), _ctypes.byref(key))
return Key(key)
|
9cbe8dfcab3e854b096a5628c6be52d6873e8ca1
| 30,517 |
def test_training_arguments_timestamp(monkeypatch, grim_config):
"""Test TrainingWrapperArguments correctly applies a timestamp."""
def mock_return():
return '2019-06-29_17-13-41'
monkeypatch.setattr(grimagents.common, "get_timestamp", mock_return)
grim_config['--timestamp'] = True
arguments = TrainingWrapperArguments(grim_config)
arguments_string = arguments.get_arguments_as_string()
assert '--run-id 3DBall-2019-06-29_17-13-41' in arguments_string
|
391b2fdbf716bfdc22f8449a3692679d6018f200
| 30,518 |
def to_vsizip(zipfn, relpth):
""" Create path from zip file """
return "/vsizip/{}/{}".format(zipfn, relpth)
|
6f5baf380bd7ab8a4ea92111efbc0f660b10f6f8
| 30,519 |
def flw3i8e(ex, ey, ez, ep, D, eq=None):
"""
Compute element stiffness (conductivity)
matrix for 8 node isoparametric field element.
Parameters:
ex = [x1,x2,x3,...,x8]
ey = [y1,y2,y3,...,y8] element coordinates
ez = [z1,z2,z3,...,z8]
ep = [ir] Ir: Integration rule
D = [[kxx,kxy,kxz],
[kyx,kyy,kyz],
[kzx,kzy,kzz]] constitutive matrix
eq heat supply per unit volume
Output:
Ke element 'stiffness' matrix (8 x 8)
fe element load vector (8 x 1)
"""
ir = ep[0]
ngp = ir*ir*ir
if eq == None:
q = 0
else:
q = eq
if ir == 2:
g1 = 0.577350269189626
w1 = 1
gp = np.mat([
[-1, -1, -1],
[1, -1, -1],
[1, 1, -1],
[-1, 1, -1],
[-1, -1, 1],
[1, -1, 1],
[1, 1, 1],
[-1, 1, 1]
])*g1
w = np.mat(np.ones((8, 3)))*w1
elif ir == 3:
g1 = 0.774596669241483
g2 = 0.
w1 = 0.555555555555555
w2 = 0.888888888888888
gp = np.mat(np.zeros((27, 3)))
w = np.mat(np.zeros((27, 3)))
I1 = np.array([-1, 0, 1, -1, 0, 1, -1, 0, 1])
I2 = np.array([0, -1, 0, 0, 1, 0, 0, 1, 0])
gp[:, 0] = np.mat([I1, I1, I1]).reshape(27, 1)*g1
gp[:, 0] = np.mat([I2, I2, I2]).reshape(27, 1)*g2+gp[:, 0]
I1 = abs(I1)
I2 = abs(I2)
w[:, 0] = np.mat([I1, I1, I1]).reshape(27, 1)*w1
w[:, 0] = np.mat([I2, I2, I2]).reshape(27, 1)*w2+w[:, 0]
I1 = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1])
I2 = np.array([0, 0, 0, 1, 1, 1, 0, 0, 0])
gp[:, 1] = np.mat([I1, I1, I1]).reshape(27, 1)*g1
gp[:, 1] = np.mat([I2, I2, I2]).reshape(27, 1)*g2+gp[:, 1]
I1 = abs(I1)
I2 = abs(I2)
w[:, 1] = np.mat([I1, I1, I1]).reshape(27, 1)*w1
w[:, 1] = np.mat([I2, I2, I2]).reshape(27, 1)*w2+w[:, 1]
I1 = np.array([-1, -1, -1, -1, -1, -1, -1, -1, -1])
I2 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])
I3 = abs(I1)
gp[:, 2] = np.mat([I1, I2, I3]).reshape(27, 1)*g1
gp[:, 2] = np.mat([I2, I3, I2]).reshape(27, 1)*g2+gp[:, 2]
w[:, 2] = np.mat([I3, I2, I3]).reshape(27, 1)*w1
w[:, 2] = np.mat([I2, I3, I2]).reshape(27, 1)*w2+w[:, 2]
else:
info("Used number of integration points not implemented")
return
wp = np.multiply(np.multiply(w[:, 0], w[:, 1]), w[:, 2])
xsi = gp[:, 0]
eta = gp[:, 1]
zet = gp[:, 2]
r2 = ngp*3
N = np.multiply(np.multiply((1-xsi), (1-eta)), (1-zet))/8.
N = np.append(N, np.multiply(np.multiply(
(1+xsi), (1-eta)), (1-zet))/8., axis=1)
N = np.append(N, np.multiply(np.multiply(
(1+xsi), (1+eta)), (1-zet))/8., axis=1)
N = np.append(N, np.multiply(np.multiply(
(1-xsi), (1+eta)), (1-zet))/8., axis=1)
N = np.append(N, np.multiply(np.multiply(
(1-xsi), (1-eta)), (1+zet))/8., axis=1)
N = np.append(N, np.multiply(np.multiply(
(1+xsi), (1-eta)), (1+zet))/8., axis=1)
N = np.append(N, np.multiply(np.multiply(
(1+xsi), (1+eta)), (1+zet))/8., axis=1)
N = np.append(N, np.multiply(np.multiply(
(1-xsi), (1+eta)), (1+zet))/8., axis=1)
dNr = np.mat(np.zeros((r2, 8)))
dNr[0:r2:3, 0] = np.multiply(-(1-eta), (1-zet))
dNr[0:r2:3, 1] = np.multiply((1-eta), (1-zet))
dNr[0:r2:3, 2] = np.multiply((1+eta), (1-zet))
dNr[0:r2:3, 3] = np.multiply(-(1+eta), (1-zet))
dNr[0:r2:3, 4] = np.multiply(-(1-eta), (1+zet))
dNr[0:r2:3, 5] = np.multiply((1-eta), (1+zet))
dNr[0:r2:3, 6] = np.multiply((1+eta), (1+zet))
dNr[0:r2:3, 7] = np.multiply(-(1+eta), (1+zet))
dNr[1:r2+1:3, 0] = np.multiply(-(1-xsi), (1-zet))
dNr[1:r2+1:3, 1] = np.multiply(-(1+xsi), (1-zet))
dNr[1:r2+1:3, 2] = np.multiply((1+xsi), (1-zet))
dNr[1:r2+1:3, 3] = np.multiply((1-xsi), (1-zet))
dNr[1:r2+1:3, 4] = np.multiply(-(1-xsi), (1+zet))
dNr[1:r2+1:3, 5] = np.multiply(-(1+xsi), (1+zet))
dNr[1:r2+1:3, 6] = np.multiply((1+xsi), (1+zet))
dNr[1:r2+1:3, 7] = np.multiply((1-xsi), (1+zet))
dNr[2:r2+2:3, 0] = np.multiply(-(1-xsi), (1-eta))
dNr[2:r2+2:3, 1] = np.multiply(-(1+xsi), (1-eta))
dNr[2:r2+2:3, 2] = np.multiply(-(1+xsi), (1+eta))
dNr[2:r2+2:3, 3] = np.multiply(-(1-xsi), (1+eta))
dNr[2:r2+2:3, 4] = np.multiply((1-xsi), (1-eta))
dNr[2:r2+2:3, 5] = np.multiply((1+xsi), (1-eta))
dNr[2:r2+2:3, 6] = np.multiply((1+xsi), (1+eta))
dNr[2:r2+2:3, 7] = np.multiply((1-xsi), (1+eta))
dNr = dNr/8.
Ke1 = np.mat(np.zeros((8, 8)))
fe1 = np.mat(np.zeros((8, 1)))
JT = dNr*np.mat([ex, ey, ez]).T
for i in range(ngp):
indx = np.array([3*(i+1)-2, 3*(i+1)-1, 3*(i+1)])
detJ = np.linalg.det(JT[indx-1, :])
if detJ < 10*np.finfo(float).eps:
info("Jacobi determinant == 0")
JTinv = np.linalg.inv(JT[indx-1, :])
B = JTinv*dNr[indx-1, :]
Ke1 = Ke1+B.T*D*B*detJ*np.asscalar(wp[i])
fe1 = fe1+N[i, :].T*detJ*wp[i]
if eq != None:
return Ke1, fe1*q
else:
return Ke1
|
90377f5ba6205e0f3bf1bc4f1fa0b84a4c69eec9
| 30,522 |
def cidr_to_netmask(value):
"""
Converts a CIDR prefix-length to a network mask.
Examples:
>>> "{{ '24'|cidr_to_netmask }}" -> "255.255.255.0"
"""
return str(netaddr.IPNetwork("1.1.1.1/{}".format(value)).netmask)
|
232f4fb65be712bfb040d75ada40ed0450d84e2d
| 30,523 |
def rgb_to_name(rgb_triplet: IntTuple, spec: str = CSS3) -> str:
"""
Convert a 3-tuple of integers, suitable for use in an ``rgb()``
color triplet, to its corresponding normalized color name, if any
such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used. The default is
CSS3.
If there is no matching name, ``ValueError`` is raised.
"""
return hex_to_name(rgb_to_hex(normalize_integer_triplet(rgb_triplet)), spec=spec)
|
fdce9304c4d16d348fe37a920ae7cf44f3c3f56b
| 30,524 |
def get_rr_Lix(N, Fmat, psd, x):
"""
Given a rank-reduced decomposition of the Cholesky factor L, calculate
L^{-1}x where x is some vector. This way, we don't have to built L, which
saves memory and computational time.
@param N: Vector with the elements of the diagonal matrix N
@param Fmat: (n x m) matrix consisting of the reduced rank basis
@param psd: PSD of the rank-reduced approximation
@param x: Vector we want to process as Lx
@return L^{-1}x
"""
n = N.shape[0]
m = Fmat.shape[1]
y = np.zeros(n)
t = np.zeros(m)
Z, B, D = get_rr_cholesky_rep(N, Fmat, psd)
BD = (B.T * np.sqrt(D)).T
for ii in range(n):
y[ii] = (x[ii] - np.dot(Z[ii,:], t)) / np.sqrt(D[ii])
t = t + y[ii] * BD[ii,:]
return y
|
3658342296f18f3afdedf2cc790e1d0062e6c49d
| 30,525 |
def ParameterSet_Create(*args):
"""
Create() -> ParameterSet
ParameterSet_Create(std::string const & publicID) -> ParameterSet
"""
return _DataModel.ParameterSet_Create(*args)
|
c6d6ef68505119b1b146d2be267a416341e09271
| 30,526 |
def generate_html_from_cli_args(cli_dict_for_command):
"""
Turn the dict into an html representation of the cli args and options.
:param cli_dict_for_command:
:return str:
"""
# def arg_md(opt, long_opt, default, help):
# return f"*) {opt}, {long_opt}, {help}\n"
text = ""
# eval_output = ""
if "usage" in cli_dict_for_command:
text += "\n<em>usage: " + str(cli_dict_for_command["usage"]) + "</em><br>\n"
if "epilog" in cli_dict_for_command:
pass
# text += "\n" + cli_dict_for_command["epilog"]
if "args" in cli_dict_for_command:
# text += "\n\n<h4>Command Line Options</h4>\n"
text += "<ul>\n"
for arg in cli_dict_for_command["args"]:
text += f"<li>{arg[0]}</li>\n"
# eval_cmd = f"arg_md({arg[0]})"
# eval_output = eval(eval_cmd) + "\n"
# print("EVAL_OUT: " + eval_output)
text += "</ul>\n"
# eval(eval_text)
return "\n\n" + text
|
15c3b6f98141ab989cbe229a2b30778d5c664c9a
| 30,527 |
def conical_sigma_Mach_walldeflection(Mach, deflection, gamma=defg._gamma):
"""computes shock angle sigma from upstream Mach number and wall deflection
Args:
Mach: param deflection:
gamma: Default value = defg._gamma)
deflection:
Returns:
"""
def local_def(sig):
"""internal wrapping function to iterative solve
"""
return conical_deflection_Mach_sigma(Mach, sig, gamma)
return ITS.secant_solve(local_def, deflection, degree.asin(1./Mach)+deflection)
|
97fc3ac999f4a598860dcf25b4fd537bfcaf9326
| 30,528 |
def get_hashrate_info(results, miner, algo):
"""
Get Hashrate Information for a particular Miner and Algo
Returns:
dict
"""
# do the lookup
hashrate_info = results.get_hashrate_info(miner, algo)
if hashrate_info is None:
logger.warning("Model/Algo combination does not exist for "
"miner model '{}' and algo '{}'".format(miner.model.model, algo))
return hashrate_info
|
e94d0d7345a54181a9d5547afd1209419a92b497
| 30,529 |
import hashlib
def verify_verification_code(doctype, document_name, verification_code):
"""This method verfies the user verification code by fetching the originally sent code by the system from cache.
Args:
doctype (str): Name of the DocType.
document_name (str): Name of the document of the DocType.
verification_code (int): User verification code
Returns:
boolean: True/False upon verification code being verified.
"""
try:
employee_user_email = frappe.session.user
cache_search_key = hashlib.md5((employee_user_email + doctype + document_name).encode('utf-8')).hexdigest()
verification_hash = hashlib.md5((employee_user_email + doctype + document_name + str(verification_code)).encode('utf-8')).hexdigest()
if not frappe.cache().get(cache_search_key):
return False
if verification_hash != frappe.cache().get(cache_search_key).decode('utf-8'):
return False
frappe.cache().delete(cache_search_key)
return True
except Exception as e:
frappe.throw(e)
|
4013d2c2ff3eb318c16af8d43ca8b50af53379ea
| 30,530 |
def orient1(ppos, apos, bpos):
"""
ORIENT1 return orientation of PP wrt. the line [PA, PB].
"""
#---------------------------------------------- calc. det(S)
smat = np.empty(
(2, 2, ppos.shape[0]), dtype=ppos.dtype)
smat[0, 0, :] = \
apos[:, 0] - ppos[:, 0]
smat[0, 1, :] = \
apos[:, 1] - ppos[:, 1]
smat[1, 0, :] = \
bpos[:, 0] - ppos[:, 0]
smat[1, 1, :] = \
bpos[:, 1] - ppos[:, 1]
sign = \
smat[0, 0, :] * smat[1, 1, :] - \
smat[0, 1, :] * smat[1, 0, :]
return np.reshape(sign, (sign.size))
|
705a27bde14c31262471b5d6a3621a695d8c091d
| 30,531 |
def get_storm_data(storm_path):
""" Obtain raster grid of the storm with rasterio
Arguments:
*storm_path* (string) -- path to location of storm
"""
with rio.open(storm_path) as src:
# Read as numpy array
array = src.read(1)
array = np.array(array,dtype='float32')
affine_storm = src.affine
return array,affine_storm
|
6db69cbd6970da467021a186f3062beaa6ed7387
| 30,532 |
import functools
def wrap_with_spectral_norm(module_class,
sn_kwargs=None,
pow_iter_collection=None):
"""Returns a constructor for the inner class with spectral normalization.
This function accepts a Sonnet AbstractModule class as argument (the class,
*not* an instance of that class) alongside an optional dictionary of keyword
arguments for the spectral_norm function, and returns a constructor which can
be treated identically to the constructor of the input class, but with
spectral normalization applied to the weights created by the class.
Internally, this is just a partially evaluated SpectralNormWrapper module.
`pow_iter_collection`, if not None, is treated as the name of a TensorFlow
global collection. Each time the module's weight matrix is accessed ops are
built for performing one step of power iteration to approximate that weight's
first singular follow and ops are created for saving this new approximation in
an internal variable. At build-time the resulting object takes a special
boolean 'enable_power_iteration' keyword argument. If this is True (the
default), a control dependency on the operation for updating this internal
variable is attached to the returned weight. Otherwise, the update is *not*
attached as a control dependency, but an op is placed into the
`pow_iter_collection` global collection which causes the internal variable to
be updated. It is then up to the user to choose whether to run this update.
Args:
module_class: A constructor/class reference for a Sonnet module you would
like to wrap and automatically apply spectral normalization.
sn_kwargs: Keyword arguments to be passed to the spectral_norm function
in addition to the weight tensor.
pow_iter_collection: The name of a global collection for potentially
storing ops for updating internal variables.
Returns:
An snt.AbstractModule class representing the original with spectral norm.
"""
sn_kwargs = sn_kwargs or {}
return functools.partial(
SpectralNormWrapper, module_class, sn_kwargs, pow_iter_collection)
|
5c849f2ee4dd8cd818ff7bebfb0564857bbb18de
| 30,533 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.