content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from sentry.plugins import plugins
def should_process(data):
"""Quick check if processing is needed at all."""
for plugin in plugins.all(version=2):
processors = safe_execute(
plugin.get_event_preprocessors, data=data, _with_transaction=False
)
if processors:
return True
if should_process_for_stacktraces(data):
return True
return False | 8e6f013d54ac1e3a0b77f8969a3700c45efdc673 | 23,428 |
from typing import Tuple
from typing import List
import gzip
def load_fasta_file(input_file: str) -> Tuple[str, List]:
"""
Load a fasta file into a list of SeqRecords.
:param input_file: The path to the input fasta file.
:returns: A tuple of the sequence type ('protein' or 'dna'), and the list of SeqRecords.
"""
if _is_gzipped(input_file):
openfunc = gzip.open
bit = 'rt'
else:
openfunc = open
bit = 'r'
with openfunc(input_file, bit) as handle:
seqs = [x.upper() for x in SeqIO.parse(handle=handle, format='fasta',
alphabet=IUPAC.ambiguous_dna)]
if not all(_verify_alphabet(x.seq) for x in seqs):
handle.seek(0)
seqs = [x.upper() for x in SeqIO.parse(handle=handle, format='fasta',
alphabet=HasStopCodon(IUPAC.extended_protein))]
if not all(_verify_alphabet(x.seq) for x in seqs):
raise ValueError('Invalid input file (neither DNA nor protein FASTA).')
return 'protein', seqs
return 'dna', seqs | 8e62e7d7002d74da7a43315785f5ce663b5ba366 | 23,429 |
from typing import get_args
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0('> building train, validation, and test datasets '
'for GPT3 ...')
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
data_prefix=args.data_path,
data_impl=args.data_impl,
splits_string=args.split,
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup))
print_rank_0("> finished creating GPT2 datasets ...")
return train_ds, valid_ds, test_ds | 06f9532c6d60a3c3858dc08a43070b8aa4d19691 | 23,430 |
import requests
def get(username, start):
"""
Second level function to pull up to 50 reviews.
start - review number to start from
"""
r = requests.get(
'{}/user/beers/?start={}&&ba={}&order=dateD&view=R'.format(
BASE_URL, start, username
)
)
beers = []
pq = PyQuery(r.text)
pq = pq('#ba-content')
pq = pq('table')
pq = pq('tr')
for tr in pq[3:]: # first 3 rows are table headers
td = tr.getchildren()[1:] # first column is review star icon
beers.append(Beer.build_from_xml(td))
return beers | 7aaccda46954b629bad37e0a77f834e5b3f40c27 | 23,431 |
def isInContinent(country_name: str, continent: str):
"""Permet de vérifier si le pays est dans un continent
Paramètres
----------
country_name : str
Le nom du pays
continent : str
Le code du continent (alpha2)
Retours
-------
is_in_continent : int
entier binaire positif si le pays est dans le continent
Exemples
-------
>>> isInContinent('Gladstone', 'OC')
1
"""
try:
# code a deux lettres du pays
calpha2 = country_name_to_country_alpha2(country_name.strip())
except KeyError:
# Certains noms de pays de nos jeux de données ne respectent pas la norme dispo sur
# wikipedia : https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
calpha2 = cn_to_ca2[country_name.strip()]
# par exemple 'EU'
concode = country_alpha2_to_continent_code(calpha2)
return int(concode == continent) | 5a78e181ace8574baa00eeadd21e7ecea8529f6c | 23,432 |
def encoder_decoder_archi(inputs, is_train):
"""
Input is assumed to be a 4-D Tensor, with [batch_size, phrase_len, 1, features]
"""
encoder_layers = []
encoded = inputs
encoder_layers.append(encoded)
for i in range(config.encoder_layers):
encoded = encoder_conv_block(encoded, i, is_train)
encoder_layers.append(encoded)
encoder_layers.reverse()
decoded = encoder_layers[0]
for i in range(config.encoder_layers):
decoded = decoder_conv_block(decoded, encoder_layers[i+1], i, is_train)
return decoded | 6b75ce8a31375173e01ccd7d33078c76aff6d2b8 | 23,433 |
def build_dict_conforming_to_schema(schema, **kwargs):
"""
Given a schema object (for example, TIMESTAMP_SCHEMA from this module) and
a set of keyword arguments, create a dictionary that conforms to the given
schema, using the keyword arguments to define the elements of the new dict.
Checks the result to make sure that it conforms to the given schema, raising
an error if not.
Returns the new dict conforming to the schema if there are no problems.
"""
# Check that schema supports a check_match call.
# Duck typing version of this check:
if not hasattr(schema, 'check_match'):
raise ValueError(
'The given "schema" does not seem to be a schema. It has no '
'"check_match" method. Given schema: ' + repr(schema))
# # Strict typing version of this check:
# # Check that schema_name is a SCHEMA.Object.
# if not isinstance(schema, schema.Schema):
# raise ValueError(
# 'The first argument must be a schema.Schema object, but is not. '
# 'Given schema: ' + repr(schema))
# The return value.
d = {}
for key, value in kwargs.items():
d[key] = value
schema.check_match(d)
return d | 8971b7c6e1df8fd16a1b0e0946c9f21a3c601512 | 23,434 |
def drop_non_channels(overlaps_df, filename):
""" Return the overlap dataframe with all channels dropped
and index reset. Save the df as a csv with the filename
passed this function. """
df = overlaps_df
channels_df_dict = {}
for column in df.columns:
# For each set of overlaps, drop all the gene names that are not
# channels. They are replaced by NaNs.
channels_bool = df.loc[:, column].isin(IUPHAR_Channels_names)
channels_df_dict[column] = df.loc[channels_bool, column]
channels_df = pd.DataFrame(channels_df_dict)
clean_channels_df = channels_df.reset_index(drop=True).copy()
for column in channels_df.columns:
# Set all of the rows in this column to NaN so they can be replaced
# by lists of channel names in each overlap.
clean_channels_df.loc[:, column] = np.NaN
channel_names = list(channels_df.loc[:, column].dropna())
# Put the list of channels in the overlap's row. Save the df
clean_channels_df.loc[0:len(channel_names)-1, column] = channel_names
clean_channels_df.to_csv(filename)
return clean_channels_df | 0cfa7f1ec86328179612c46c6b5f4b787984a7fa | 23,435 |
def _REOM(y,t,pot,l2):
"""
NAME:
_REOM
PURPOSE:
implements the EOM, i.e., the right-hand side of the differential
equation
INPUT:
y - current phase-space position
t - current time
pot - (list of) Potential instance(s)
l2 - angular momentum squared
OUTPUT:
dy/dt
HISTORY:
2010-07-20 - Written - Bovy (NYU)
"""
return [y[1],
l2/y[0]**3.+_evaluateplanarRforces(pot,y[0],t=t)] | 427393c1eeb89214603dc8363a9b39084e9030d4 | 23,437 |
def optimize_inst(module, inst):
"""Simplify one instruction"""
for operand in inst.operands:
if isinstance(operand, ir.Id):
if operand.inst.op_name not in ir.CONSTANT_INSTRUCTIONS:
return inst
if inst.op_name == 'OpCompositeConstruct':
inst = optimize_OpCompositeConstruct(module, inst)
elif inst.op_name == 'OpCompositeExtract':
inst = optimize_OpCompositeExtract(inst)
elif inst.op_name == 'OpIAdd':
inst = optimize_OpIAdd(module, inst)
elif inst.op_name == 'OpIMul':
inst = optimize_OpIMul(module, inst)
elif inst.op_name == 'OpLogicalAnd':
inst = optimize_OpLogicalAnd(module, inst)
elif inst.op_name == 'OpLogicalEqual':
inst = optimize_OpLogicalEqual(module, inst)
elif inst.op_name == 'OpLogicalNot':
inst = optimize_OpLogicalNot(module, inst)
elif inst.op_name == 'OpLogicalNotEqual':
inst = optimize_OpLogicalNotEqual(module, inst)
elif inst.op_name == 'OpLogicalOr':
inst = optimize_OpLogicalOr(module, inst)
elif inst.op_name == 'OpNot':
inst = optimize_OpNot(module, inst)
elif inst.op_name == 'OpSNegate':
inst = optimize_OpSNegate(module, inst)
elif inst.op_name == 'OpVectorShuffle':
inst = optimize_OpVectorShuffle(module, inst)
return inst | 1de61b914bdac4076be4ffb27823ad9384504814 | 23,438 |
def table_3_3(M, lambd_nos, lambd_cil):
"""
Функция для вывода Су для оживальной ГЧ
arguments: число Маха, относительное удлинение носка и цилиндрической части
return: Значение Су ГЧ
"""
cy1iz_alf_0 = [0.0350, 0.0350, 0.0350, 0.0350, 0.0362, 0.0375, 0.0380, 0.0378,
0.0374, 0.0364, 0.0350, 0.0337, 0.0325, 0.0315, 0.0305, 0.0300]
cy1iz_alf_05 = [0.0350, 0.0350, 0.0350, 0.0358, 0.0375, 0.0410, 0.0445, 0.0472,
0.0480, 0.0475, 0.0460, 0.0435, 0.0420, 0.0385, 0.0375, 0.0365]
cy1iz_alf_1 = [0.0350, 0.0350, 0.0350, 0.0358, 0.0375, 0.0410, 0.0455, 0.0495,
0.0515, 0.0520, 0.0515, 0.0485, 0.0465, 0.0445, 0.0425, 0.0410]
cy1iz_alf_2 = [0.0350, 0.0350, 0.0350, 0.0358, 0.0380, 0.0430, 0.0455, 0.0515,
0.0540, 0.0555, 0.0552, 0.0535, 0.0515, 0.0485, 0.0470, 0.0455]
cy1iz_alf_4 = [0.0350, 0.0350, 0.0350, 0.0358, 0.0375, 0.0410, 0.0455, 0.0515,
0.0549, 0.0565, 0.0565, 0.0505, 0.0545, 0.0524, 0.0502, 0.0480]
razm = [-0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.4, 1.6, 1.8, 2, 2.2]
if (M**2 - 1) >= 0:
razmm = np.sqrt(M**2 - 1) / lambd_nos
else:
razmm = -np.sqrt(1 - M**2) / lambd_nos
otnos = lambd_cil / lambd_nos
if otnos == 0:
cy1 = np.interp(razmm, razm, cy1iz_alf_0)
elif (otnos <= 0.5) and (otnos > 0):
cy1 = interpol(np.interp(razmm, razm, cy1iz_alf_0), np.interp(razmm, razm, cy1iz_alf_05), otnos / 0.5)
elif (otnos <= 1) and (otnos > 0.5):
cy1 = interpol(np.interp(razmm, razm, cy1iz_alf_05), np.interp(razmm, razm, cy1iz_alf_1), (otnos - 0.5) / 0.5)
elif (otnos <= 2) and (otnos > 1):
cy1 = interpol(np.interp(razmm, razm, cy1iz_alf_1), np.interp(razmm, razm, cy1iz_alf_2), otnos - 1)
elif (otnos <= 4) and (otnos > 2):
cy1 = interpol(np.interp(razmm, razm, cy1iz_alf_2), np.interp(razmm, razm, cy1iz_alf_4), otnos - 2)
else:
cy1 = np.interp(razmm, razm, cy1iz_alf_4)
return cy1 | d0d4b2e1fa65f3e8ad2cd39bee1d0d4878293090 | 23,439 |
def ms_to_timestamp(ms):
"""Convert ms to 'HH:MM:SS,mmm'"""
# XXX throw on overflow/underflow?
if ms < 0: ms = 0
if ms > MAX_REPRESENTABLE_TIME: ms = MAX_REPRESENTABLE_TIME
h, m, s, ms = ms_to_times(ms)
return "%02d:%02d:%02d,%03d" % (h, m, s, ms) | 514773d94f4e3b78594bed4f232f34bcd2956f4d | 23,440 |
import torch
def _lovasz_softmax_flat(y_pred, y_true, classes="present"):
"""
Multi-class Lovasz-Softmax loss
y_pred: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
y_true: [P] Tensor, ground truth y_true (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in y_true, or a list of classes to average.
"""
if y_pred.numel() == 0:
# only void pixels, the gradients should be 0
return y_pred * 0.0
C = y_pred.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ["all", "present"] else classes
for c in class_to_sum:
fg = (y_true == c).float() # foreground for class c
if classes is "present" and fg.sum() == 0:
continue
if C == 1:
if len(classes) > 1:
raise ValueError("Sigmoid output possible only with 1 class")
class_pred = y_pred[:, 0]
else:
class_pred = y_pred[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(_lovasz_grad(fg_sorted))))
return mean(losses) | 9cdbab2873e198750079e560a559b1f4eb8f256c | 23,441 |
def quantum_state_encoding_circuit(bits):
"""根据`bits`构建并返回量子态编码线路."""
circuit = cirq.Circuit()
circuit.append(cirq.H.on_each(bits))
return circuit | 75734a349187af7ac32683d5faf6aec331f25713 | 23,442 |
from datetime import datetime
def parse_mov_date(date_str):
"""converts string to date"""
try:
return datetime.datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S%z")
except (TypeError, ValueError):
pass
return None | 6d4f1ad566f3e3914eeed7f9c29d914f1ced96df | 23,443 |
def get_settable_attr(attr):
"""
If attr is not settable, navigate upp in the connection hierarchy until we find the settable attribute.
For example, in RigSqueeze, the ikFk state attribute will be redirected to the root ctrl.
Note that in some case the attribute might have been piped in an utility node, if necessary we'll try to
follow the connections through the utility node.
"""
def is_attr_interesting(attr):
if not attr:
return True
if not attr.isSettable() or not attr.isKeyable():
return False
classification = pymel.getClassification(attr.node().type())
if any(True for token in classification if 'utility' in token):
return False
return True
while not is_attr_interesting(attr):
attr = get_input_attr_from_output_attr(attr)
return attr | aca71e6e7f9e1312beaf1c4dcba897073ae3b3ea | 23,444 |
def adds(repo, subset, x):
"""Changesets that add a file matching pattern.
The pattern without explicit kind like ``glob:`` is expected to be
relative to the current directory and match against a file or a
directory.
"""
# i18n: "adds" is a keyword
pat = getstring(x, _(b"adds requires a pattern"))
return checkstatus(repo, subset, pat, 'added') | 6d9d1879c77f64bb68d43483cc2d3095328fd26f | 23,445 |
def data_context_topology_context_topologyuuid_linklink_uuid_available_capacity_bandwidth_profile_committed_information_rate_get(uuid, link_uuid): # noqa: E501
"""data_context_topology_context_topologyuuid_linklink_uuid_available_capacity_bandwidth_profile_committed_information_rate_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of topology
:type uuid: str
:param link_uuid: Id of link
:type link_uuid: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!' | b44e48aa0fff6b01da22576fc73352deba812636 | 23,446 |
from typing import List
from typing import Dict
from operator import and_
def update_mlwh_with_cog_uk_ids(samples: List[Dict[str, str]]) -> None:
"""Update the MLWH to write the COG UK barcode for each sample.
Arguments:
samples {List[Dict[str, str]]} -- list of samples to be updated
"""
if len(samples) == 0:
return None
# assign db_connection to avoid UnboundLocalError in 'finally' block, in case of exception
db_connection = None
try:
data = []
for sample in samples:
# using 'b_' prefix for the keys because bindparam() doesn't allow you to use the real
# column names
data.append(
{
"b_root_sample_id": sample[FIELD_ROOT_SAMPLE_ID],
"b_rna_id": sample[FIELD_RNA_ID],
"b_result": sample[FIELD_RESULT],
"b_cog_uk_id": sample[FIELD_COG_BARCODE],
}
)
sql_engine = create_mysql_connection_engine(
app.config["WAREHOUSES_RW_CONN_STRING"], app.config["ML_WH_DB"]
)
table = get_table(sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"])
stmt = (
table.update()
.where(
and_(
table.c.root_sample_id == bindparam("b_root_sample_id"),
table.c.rna_id == bindparam("b_rna_id"),
table.c.result == bindparam("b_result"),
)
)
.values(cog_uk_id=bindparam("b_cog_uk_id"))
)
db_connection = sql_engine.connect()
results = db_connection.execute(stmt, data)
rows_matched = results.rowcount
if rows_matched != len(samples):
msg = f"""
Updating MLWH {app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table with COG UK ids was
only partially successful.
Only {rows_matched} of the {len(samples)} samples had matches in the MLWH
{app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table.
"""
logger.error(msg)
raise UnmatchedSampleError(msg)
except (Exception) as e:
msg = f"""
Error while updating MLWH {app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table with COG UK
ids.
{type(e).__name__}: {str(e)}
"""
logger.error(msg)
raise
finally:
if db_connection is not None:
db_connection.close() | b4d6dfaec4bb40a59cbdfef619f7f4542e55e2a9 | 23,450 |
def make_09f9():
"""倉庫インベントリーフッタ"""
return "" | 91d21aeb58fc004865db91846d73f978f48f9be4 | 23,451 |
def get_last_successful_hour_or_start_hour():
"""Get the last hour that ran successfully or the start hour."""
last_hour = crash_stats.get_last_successful_hour()
if last_hour:
return last_hour
return get_start_hour() | 86518100bafe3296d63a8ac3612de1fa2c2ed8d4 | 23,452 |
import copy
from datetime import datetime
def encode_jwt(payload, secret):
"""
Return ``payload`` as a JWT encoded with ``secret``.
Return a JWT whose payload is ``payload`` and that is signed using
``secret``.
:arg payload: the payload to encode
:type payload: dict
:arg secret: the secret to sign the JWT with
:type secret: str
:return: the JWT string
:rtype: str
"""
payload = copy.deepcopy(payload)
payload["exp"] = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
jwt_bytes = jwt.encode(payload, secret, algorithm="HS256")
# PyJWT returns JWT's as UTF8-encoded byte strings (this isn't
# documented, but see
# https://github.com/jpadilla/pyjwt/blob/ed28e495f937f50165a252fd5696a82942cd83a7/jwt/api_jwt.py#L62).
# We need a unicode string, so decode it.
jwt_str = jwt_bytes.decode("utf-8")
return jwt_str | 497d5180e8956a737ad6edbd1113d73aeb915e80 | 23,453 |
def make_model():
"""
Loads pretrained torchvision model and redefines fc layer for car classification
"""
# uses about 1 GiB of GPU memory
model = models.vgg19(pretrained = True)
#model = models.resnet50(pretrained = True)
in_feat_num = model.classifier[3].in_features
mid_feat_num = int(np.sqrt(in_feat_num))
out_feat_num = 2
# redefine the last two layers of the classifier for car classification
model.classifier[3] = nn.Linear(in_feat_num,mid_feat_num)
model.classifier[6] = nn.Linear(mid_feat_num, out_feat_num)
return model | cd189f4b4d4dcadf6dd686aad08e2e494e0c2200 | 23,454 |
def empty_call_false(*args, **kwargs) -> bool:
"""
Do nothing and return False
"""
return False | 3b3964c859a47698f0000e1b26963953980fad51 | 23,455 |
def cookie_is_encoded(data):
""" Tests whether or not a cookie is encoded / HMAC signed
-> #bool True if encoded
..
from vital.security import cookie_is_encoded
cookie_is_encoded(
"!YuOoKwDp8GhrwwojdjTxSCj1c2Z+7yz7r6cC7E3hBWo=?IkhlbGxvLCB3b3JsZC4i")
# -> True
..
"""
return data.startswith('!') and '?' in data | baf2a05b516a23cacca4985944974112019abfda | 23,456 |
import torch
def l2_normalize(x: torch.Tensor, eps: float = 1e-12) -> torch.Tensor:
"""Normalizes the input tensor using L2-norm.
Args:
x: Tensor to be normalized.
eps: Small value to avoid division by zero.
Returns:
Normalized tensor.
"""
return x / (torch.norm(x, p=2, dim=1, keepdim=True) + eps).expand_as(x) | 22273bbbda7bece511d31d517790bfa14427d76f | 23,457 |
from re import S
def ssq_cwt(x, wavelet='gmw', scales='log-piecewise', nv=None, fs=None, t=None,
ssq_freqs=None, padtype='reflect', squeezing='sum', maprange='peak',
difftype='trig', difforder=None, gamma=None, vectorized=True,
preserve_transform=None, astensor=True, order=0, patience=0,
flipud=True, cache_wavelet=None, get_w=False, get_dWx=False):
"""Synchrosqueezed Continuous Wavelet Transform.
Implements the algorithm described in Sec. III of [1].
Uses `wavelet.dtype` precision.
# Arguments:
x: np.ndarray
Input vector(s), 1D or 2D. See `help(cwt)`.
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain. See `help(cwt)`.
scales: str['log', 'linear', 'log:maximal', ...] / np.ndarray
CWT scales. See `help(cwt)`.
nv: int / None
Number of voices (wavelets per octave). Suggested >= 16.
fs, t
See `help(_cwt.cwt)`.
ssq_freqs: str['log', 'linear'] / np.ndarray / None
Frequencies to synchrosqueeze CWT scales onto. Scale-frequency
mapping is only approximate and wavelet-dependent.
If None, will infer from and set to same distribution as `scales`.
padtype: str / None
Pad scheme to apply on input. See `help(utils.padsignal)`.
`None` -> no padding.
squeezing: str['sum', 'lebesgue'] / function
See `help(ssqueezing.ssqueeze)`.
maprange: str['maximal', 'peak', 'energy'] / tuple(float, float)
Kind of frequency mapping used, determining the range of frequencies
spanned (fm to fM, min to max).
- 'maximal': fm=1/dT, fM=1/(2*dt), always. Data's fundamental
and Nyquist frequencies, determined from `fs` (or `t`).
Other mappings can never span outside this range.
- ('peak', 'energy'): sets fm and fM based on center frequency
associated with `wavelet` at maximum and minimum scale,
respectively. See `help(wavelets.center_frequency)`.
- 'peak': the frequency-domain trimmed bell will have its peak
at Nyquist, meaning all other frequencies are beneath, so each
scale is still correctly resolved but with downscaled energies.
With sufficiently-spanned `scales`, coincides with 'maximal'.
- 'energy': however, the bell's spectral energy is centered
elsewhere, as right-half of bell is partly or entirely trimmed
(left-half can be trimmed too). Use for energy-centric mapping,
which for sufficiently-spanned `scales` will always have lesser
fM (but ~same fM).
- tuple: sets `ssq_freqrange` directly.
difftype: str['trig', 'phase', 'numeric']
Method by which to differentiate Wx (default='trig') to obtain
instantaneous frequencies:
w(a,b) = Im( (1/2pi) * (1/Wx(a,b)) * d/db[Wx(a,b)] )
- 'trig': use `dWx`, obtained via trigonometric (frequency-domain
interpolant) differentiation (see `cwt`, `phase_cwt`).
- 'phase': differentiate by taking forward finite-difference of
unwrapped angle of `Wx` (see `phase_cwt`).
- 'numeric': first-, second-, or fourth-order (set by `difforder`)
numeric differentiation (see `phase_cwt_num`).
difforder: int[1, 2, 4]
Order of differentiation for difftype='numeric' (default=4).
gamma: float / None
CWT phase threshold. Sets `w=inf` for small values of `Wx` where
phase computation is unstable and inaccurate (like in DFT):
w[abs(Wx) < beta] = inf
This is used to zero `Wx` where `w=0` in computing `Tx` to ignore
contributions from points with indeterminate phase.
Default = sqrt(machine epsilon) = np.sqrt(np.finfo(np.float64).eps)
vectorized: bool (default True)
Whether to vectorize CWT, i.e. compute quantities for all scales at
once, which is faster but uses more memory.
preserve_transform: bool (default None) / None
Whether to return `Wx` as directly output from `cwt` (it might be
altered by `ssqueeze` or `phase_transform`). Uses more memory
per storing extra copy of `Wx`.
- Defaults to True if `'SSQ_GPU' == '0'`, else False.
astensor: bool (default True)
If `'SSQ_GPU' == '1'`, whether to return arrays as on-GPU tensors
or move them back to CPU & convert to Numpy arrays.
order: int (default 0) / tuple[int]
`order > 0` computes ssq of `cwt` taken with higher-order GMWs.
If tuple, computes ssq of average of `cwt`s taken at each specified
order. See `help(_cwt.cwt_higher_order)`.
patience: int / tuple[int, int]
pyFFTW parameter for faster FFT on CPU; see `help(ssqueezepy.FFT)`.
flipud: bool (default True)
See `help(ssqueeze)`.
cache_wavelet: bool (default None) / None
See `help(cwt)`.
get_w, get_dWx: bool (default False)
`get_w`:
True: will compute phase transform separately, assign it to
array `w` and return it.
False: will compute synchrosqueezing directly from `Wx` and
`dWx` without assigning to intermediate array, which is faster
(by 20-30%) and takes less memory.
`get_dWx`:
True: will return dWx
False: discards dWx after computing `w` or synchrosqueezing.
`get_dWx=True` with `get_w=True` uses most memory.
These options do not affect `Tx`.
# Returns:
Tx: np.ndarray [nf x n]
Synchrosqueezed CWT of `x`. (rows=~frequencies, cols=timeshifts)
(nf = len(ssq_freqs); n = len(x))
`nf = na` by default, where `na = len(scales)`.
Wx: np.ndarray [na x n]
Continuous Wavelet Transform of `x`, L1-normed (see `cwt`).
ssq_freqs: np.ndarray [nf]
Frequencies associated with rows of `Tx`.
scales: np.ndarray [na]
Scales associated with rows of `Wx`.
w: np.ndarray [na x n] (if `get_w=True`)
Phase transform for each element of `Wx`.
dWx: [na x n] np.ndarray (if `get_dWx=True`)
See `help(_cwt.cwt)`.
# References:
1. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
2. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models.
I. Daubechies, S. Maes.
https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf
3. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
4. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
5. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_cwt_fw.m
"""
def _process_args(x, scales, fs, t, nv, difftype, difforder, squeezing,
maprange, wavelet, get_w):
if x.ndim == 2 and get_w:
raise NotImplementedError("`get_w=True` unsupported with batched "
"input.")
difforder = _check_ssqueezing_args(squeezing, maprange, wavelet,
difftype, difforder, get_w,
transform='cwt')
if nv is None and not isinstance(scales, np.ndarray):
nv = 32
N = x.shape[-1]
dt, fs, t = _process_fs_and_t(fs, t, N)
return N, dt, fs, difforder, nv
def _phase_transform(Wx, dWx, N, dt, gamma, difftype, difforder):
if difftype == 'trig':
# calculate instantaneous frequency directly from the
# frequency-domain derivative
w = phase_cwt(Wx, dWx, difftype, gamma)
elif difftype == 'phase':
# !!! bad; yields negatives, and forcing abs(w) doesn't help
# calculate inst. freq. from unwrapped phase of CWT
w = phase_cwt(Wx, None, difftype, gamma)
elif difftype == 'numeric':
# !!! tested to be very inaccurate for small scales
# calculate derivative numericly
_, n1, _ = p2up(N)
Wx = Wx[:, (n1 - 4):(n1 + N + 4)]
w = phase_cwt_num(Wx, dt, difforder, gamma)
return Wx, w
N, dt, fs, difforder, nv = _process_args(x, scales, fs, t, nv, difftype,
difforder, squeezing, maprange,
wavelet, get_w)
wavelet = Wavelet._init_if_not_isinstance(wavelet, N=N)
# CWT with higher-order GMWs
if isinstance(order, (tuple, list, range)) or order > 0:
# keep padding for `trigdiff`
kw = dict(wavelet=wavelet, scales=scales, fs=fs, t=t, nv=nv,
l1_norm=True, derivative=False, padtype=padtype, rpadded=True,
vectorized=vectorized, cache_wavelet=cache_wavelet)
_, n1, _ = p2up(N)
average = isinstance(order, (tuple, list, range))
Wx, scales = cwt(x, order=order, average=average, **kw)
dWx = trigdiff(Wx, fs, rpadded=True, N=N, n1=n1)
Wx = Wx[:, n1:n1 + N]
if S.is_tensor(Wx):
Wx = Wx.contiguous()
scales, cwt_scaletype, *_ = process_scales(scales, N, wavelet, nv=nv,
get_params=True)
# regular CWT
if order == 0:
# l1_norm=True to spare a multiplication; for SSQ_CWT L1 & L2 are exactly
# same anyway since we're inverting CWT over time-frequency plane
rpadded = (difftype == 'numeric')
Wx, scales, dWx = cwt(x, wavelet, scales=scales, fs=fs, nv=nv,
l1_norm=True, derivative=True, padtype=padtype,
rpadded=rpadded, vectorized=vectorized,
patience=patience, cache_wavelet=cache_wavelet)
# make copy of `Wx` if specified
if preserve_transform is None:
preserve_transform = not S.is_tensor(Wx)
if preserve_transform:
_Wx = (Wx.copy() if not S.is_tensor(Wx) else
Wx.detach().clone())
else:
_Wx = Wx
# gamma
if gamma is None:
gamma = np.sqrt(EPS64 if S.is_dtype(Wx, 'complex128') else EPS32)
# compute `w` if `get_w` and free `dWx` from memory if `not get_dWx`
if get_w:
_Wx, w = _phase_transform(_Wx, dWx, N, dt, gamma, difftype, difforder)
_dWx = None # don't use in `ssqueeze`
if not get_dWx:
dWx = None
else:
w = None
_dWx = dWx
# default to same scheme used by `scales`
if ssq_freqs is None:
ssq_freqs = cwt_scaletype
# affects `maprange` computation if non-tuple
was_padded = bool(padtype is not None)
# synchrosqueeze
Tx, ssq_freqs = ssqueeze(_Wx, w, ssq_freqs, scales, fs=fs, t=t,
squeezing=squeezing, maprange=maprange,
wavelet=wavelet, gamma=gamma, was_padded=was_padded,
flipud=flipud, dWx=_dWx, transform='cwt')
if difftype == 'numeric':
Wx = Wx[:, 4:-4]
Tx = Tx[:, 4:-4]
w = w[:, 4:-4] if w is not None else None
if not astensor and S.is_tensor(Tx):
Tx, Wx, w, dWx = [g.cpu().numpy() if S.is_tensor(g) else g
for g in (Tx, Wx, w, dWx)]
if get_w and get_dWx:
return Tx, Wx, ssq_freqs, scales, w, dWx
elif get_w:
return Tx, Wx, ssq_freqs, scales, w
elif get_dWx:
return Tx, Wx, ssq_freqs, scales, dWx
else:
return Tx, Wx, ssq_freqs, scales | 2776e85dde171b1c47fdce028bf9c845298b3a93 | 23,458 |
import torch
def predict_image_classification(model: nn.Module, input_: torch.Tensor):
"""
Predict using an image classification model.
Args:
model (`nn.Module`):
Pytorch model.
input_ (`Tensor`):
Input image tensor.
Returns:
(`tuple`)
Prediction score which max is 1, and label idx.
"""
output = model(input_)
output = F.softmax(output, dim=1)
prediction_score, pred_label_idx = torch.topk(output, 1)
if isinstance(pred_label_idx, torch.Tensor):
pred_label_idx = pred_label_idx.squeeze().item()
prediction_score = prediction_score.squeeze().detach().item()
return prediction_score, pred_label_idx | 2343d4db9b93910337e0e55b9935783714710330 | 23,459 |
def _id_to_box(id_, dim):
"""Convert id to box ID"""
row = id_ // (dim ** 3)
col = (id_ % (dim ** 2)) // dim
return row * dim + col | 8e6c4779872fff5cdc5a6ca6b4143a1519d8aaf2 | 23,460 |
import string
def _load_hex(instream):
"""Load font from a .hex file."""
global_comment = []
glyphs = []
comment = []
for line in instream:
line = line.rstrip('\r\n')
if ':' in line:
# parse code line
key, value = line.rsplit(':', 1)
value = value.strip()
if (
# preserve empty lines if they separate comments
(not line and comment and comment[-1] != '')
# marked as comment
or line[0] == '#'
# pass through lines without : as comments - allows e.g. to convert diffs, like hexdraw
or (':' not in line)
# not a valid line, treat as comment
or set(value) - set(string.hexdigits + ',')
):
comment.append(line)
else:
# when first glyph is found, split comment lines between global and glyph
if not glyphs and comment:
global_comment, comment = split_global_comment(comment)
glyphs.append(_convert_glyph(key, value, comment))
comment = []
# preserve any comment at end of file as part of global comment
global_comment = '\n'.join([*_clean_comment(global_comment), *_clean_comment(comment)])
return Font(glyphs, comments=global_comment, properties=dict(encoding='unicode')) | 6e5980e53ee598d813f10bdbcb775e8d47102fa8 | 23,461 |
def make_small_graph(graph_description, create_using=None):
"""
Return the small graph described by graph_description.
graph_description is a list of the form [ltype,name,n,xlist]
Here ltype is one of "adjacencylist" or "edgelist",
name is the name of the graph and n the number of nodes.
This constructs a graph of n nodes with integer labels 0,..,n-1.
If ltype="adjacencylist" then xlist is an adjacency list
with exactly n entries, in with the j'th entry (which can be empty)
specifies the nodes connected to vertex j.
e.g. the "square" graph C_4 can be obtained by
>>> G = nx.make_small_graph(
... ["adjacencylist", "C_4", 4, [[2, 4], [1, 3], [2, 4], [1, 3]]]
... )
or, since we do not need to add edges twice,
>>> G = nx.make_small_graph(["adjacencylist", "C_4", 4, [[2, 4], [3], [4], []]])
If ltype="edgelist" then xlist is an edge list
written as [[v1,w2],[v2,w2],...,[vk,wk]],
where vj and wj integers in the range 1,..,n
e.g. the "square" graph C_4 can be obtained by
>>> G = nx.make_small_graph(
... ["edgelist", "C_4", 4, [[1, 2], [3, 4], [2, 3], [4, 1]]]
... )
Use the create_using argument to choose the graph class/type.
"""
if graph_description[0] not in ("adjacencylist", "edgelist"):
raise NetworkXError("ltype must be either adjacencylist or edgelist")
ltype = graph_description[0]
name = graph_description[1]
n = graph_description[2]
G = empty_graph(n, create_using)
nodes = G.nodes()
if ltype == "adjacencylist":
adjlist = graph_description[3]
if len(adjlist) != n:
raise NetworkXError("invalid graph_description")
G.add_edges_from([(u - 1, v) for v in nodes for u in adjlist[v]])
elif ltype == "edgelist":
edgelist = graph_description[3]
for e in edgelist:
v1 = e[0] - 1
v2 = e[1] - 1
if v1 < 0 or v1 > n - 1 or v2 < 0 or v2 > n - 1:
raise NetworkXError("invalid graph_description")
else:
G.add_edge(v1, v2)
G.name = name
return G | deb1cf0d08bba91a538c7d2c47c1d89e2c2a28da | 23,462 |
def get_masksize(mask, labelnum = None):
"""
Compute mask size in surface space
Parameters:
----------
mask: label image (mask)
labelnum: mask's label number, use for group analysis
Return:
--------
masksize: mask size of each roi
Example:
--------
>>> masksize = get_masksize(mask)
"""
if mask.ndim == 3:
mask = mask[:,0,0]
labels = np.unique(mask)[1:]
masksize = []
if len(labels) != 0:
if labelnum is None:
labelnum = int(np.max(labels))
for i in range(labelnum):
masksize.append(len(mask[mask == i+1]))
else:
masksize.append(0)
return np.array(masksize) | c8ccd82d9887f923e3d2581f97dd2a8f016cc182 | 23,463 |
def _context_py2rpmversion(context):
"""get a python PEP0440 compatible version and translate it to an RPM
version"""
# the context needs a variable set via {% set upstream_version = 'ver' %}
_context_check_variable(context, CONTEXT_VAR_UPSTREAM_VERSION,
'py2rpmversion')
version = context.vars[CONTEXT_VAR_UPSTREAM_VERSION]
v_python = parse(version)
# fedora does not allow '~' in versions but uses a combination of Version
# and Release
# https://fedoraproject.org/wiki/Packaging:Versioning\#Pre-Release_packages
if context['spec_style'] == 'fedora':
if len(v_python._version.release) >= 4:
return "%d.%d.%d" % (v_python._version.release[0:3])
else:
return v_python.base_version
else:
v_rpm = v_python.public
if v_python.is_prerelease:
# we need to add the 'x' in front of alpha/beta releases because
# in the python world, "1.1a10" > "1.1.dev10"
# but in the rpm world, "1.1~a10" < "1.1~dev10"
v_rpm = v_rpm.replace('a', '~xalpha')
v_rpm = v_rpm.replace('b', '~xbeta')
v_rpm = v_rpm.replace('rc', '~xrc')
v_rpm = v_rpm.replace('.dev', '~dev')
return v_rpm | 3f9110dff377a6c819e6b87ab5fd9c81a7532694 | 23,464 |
def check_and_format_address(address):
"""
check address
"""
try:
formatted_address = to_checksum_address(address)
return formatted_address
except Exception as e:
raise ArgumentsError("invalid address {}, reason: {}"
.format(address, e)) | 1b0c88aede34386d1ccd5facd1bdbd4724538ab7 | 23,465 |
from typing import Optional
def get_cache_name(cache_type: str, tag: Optional[str] = None) -> str:
"""
Get the canonical cache name (e.g., "tmp.cache.mem.tag") for a type of
cache.
:param cache_type: type of a cache
:param tag: optional unique tag of the cache, empty by default
:return: name of the folder for a cache
"""
_check_valid_cache_type(cache_type)
cache_name = "tmp.cache"
cache_name += f".{cache_type}"
if tag is not None:
cache_name += f".{tag}"
return cache_name | ff933829314dd1794406ca4282eaf4efdf860b39 | 23,466 |
def _aves2_cfg():
""" Read aipctl config
"""
config = ConfigObj()
# The result is a merge of all the files as they appear in the list
f_list = cfg_files()
if not f_list:
print("error: configuration file not found")
exit(1)
for f in cfg_files():
_cfg = ConfigObj(f, encoding='UTF8')
config.merge(_cfg)
return config | 527f1e94d5ec2c5cd13aa1a886d4c56914828f4d | 23,467 |
def estimate_responsivity(mis_MU, norm_MU):
"""from the estimated base intensities, we return onlu users which have zero base intensity for misinformation
and greater than zero base intensity for normal content. """
no_bad_intentions_ids = []
for id in range(len(mis_MU)):
if mis_MU[id] == 0 and norm_MU[id] != 0:
no_bad_intentions_ids.append(id)
return no_bad_intentions_ids | 4d944478694f1be1474eea963fad284079d5fe57 | 23,469 |
from typing import Union
from typing import Any
from datetime import datetime
def parse_field_constraint(
x: Union[str, int, float, bool, list],
constraint: str,
type: str = "string",
**field: Any,
) -> Union[str, int, float, bool, list, datetime.datetime, ConstraintTypeError]:
"""
Parse field constraint.
Arguments:
x: Constraint value.
constraint: Constraint type.
type: Field type.
field: Additional field attributes
(https://specs.frictionlessdata.io/table-schema/#field-descriptors).
Returns:
Parsed field constraint.
"""
is_list = isinstance(x, list)
X = pd.Series(x)
is_str = X.apply(lambda xi: isinstance(xi, str))
if not is_str.any():
return x
result = parse_field(X[is_str], type=type, **field)
if isinstance(result, ValueTypeError):
return ConstraintTypeError(
fieldName=field.get("name", ""),
constraintName=constraint,
constraintValue=X[is_str].unique().tolist() if is_list else x,
fieldType=type,
fieldFormat=result["fieldFormat"],
)
X[is_str] = result
return X.tolist() if is_list else X[0] | 531e33a1bc79e8a232032ebe9d340f829a3f513c | 23,470 |
def compute_ab_cycles(c_cycles, linear_combinations, g, tretkoff_graph):
"""
Returns the a- and b-cycles of the Riemann surface given the
intermediate 'c-cycles' and linear combinations matrix.
Input:
- c_cycles
- linear_combinations: output of the Frobenius transform of the
"""
lincomb = linear_combinations
M,N = lincomb.shape
a_cycles = []
b_cycles = []
for i in range(g):
a = []
b = []
for j in range(N):
cij = lincomb[i,j]
c = c_cycles[j] if cij >= 0 else reverse_cycle(c_cycles[j])
a.extend(abs(cij)*c[:-1])
cij = lincomb[i+g,j]
c = c_cycles[j] if cij >= 0 else reverse_cycle(c_cycles[j])
b.extend(abs(cij)*c[:-1])
a = a + [0]
b = b + [0]
a = compress_cycle(a, tretkoff_graph)
b = compress_cycle(b, tretkoff_graph)
a_cycles.append(a)
b_cycles.append(b)
return a_cycles, b_cycles | 645d569ee06cb87161b12158603b1b6dcfb92077 | 23,471 |
import pickle
import pathlib
def pmlb_multiclass_classification_dataset_names():
"""Returns list of multiclass classification datasets in PMLB."""
try:
name = pickle.load(open(".pmlb/mcdn.pkl", "rb"))
except FileNotFoundError:
pathlib.Path(".pmlb").mkdir(parents=True, exist_ok=True)
name = []
for dataset in pmlb.classification_dataset_names:
X, y = pmlb.fetch_data(dataset, return_X_y=True, local_cache_dir=".pmlb")
if np.unique(y).size != 2:
name.append(dataset)
pickle.dump(name, open(".pmlb/mcdn.pkl", "wb"))
return name | d3030441c119de0c96c9d83df026b7f922fe21e6 | 23,472 |
from losses.loss_functions import BalancedCrossEntropyLoss
from losses.loss_functions import SoftMaxwithLoss
from losses.loss_functions import NormalsLoss
from losses.loss_functions import BalancedCrossEntropyLoss
from losses.loss_functions import DepthLoss
def get_loss(p, task=None):
""" Return loss function for a specific task """
if task == 'edge':
criterion = BalancedCrossEntropyLoss(size_average=True, pos_weight=p['edge_w'])
elif task == 'semseg' or task == 'human_parts':
criterion = SoftMaxwithLoss()
elif task == 'normals':
criterion = NormalsLoss(normalize=True, size_average=True, norm=p['normloss'])
elif task == 'sal':
criterion = BalancedCrossEntropyLoss(size_average=True)
elif task == 'depth':
criterion = DepthLoss(p['depthloss'])
else:
raise NotImplementedError('Undefined Loss: Choose a task among '
'edge, semseg, human_parts, sal, depth, or normals')
return criterion | 6284d2e40fc8aa220c153307fc7199a47549d15d | 23,473 |
def compute_embeddings(image):
"""A mock function for a call to a deep learning model or a web service."""
del image # this is just a mock and doesn't do anything with the input
return 42 | 31536d4a2371140e962aadb63b8645685328b3df | 23,474 |
def text_to_string(filename):
"""Read a text file and return a string."""
with open(filename) as infile:
return infile.read() | dbd79e78c84c3374c0252544086885b909ae9bd9 | 23,476 |
def lgsvlToScenicElevation(pos):
"""Convert LGSVL positions to Scenic elevations."""
return pos.y | d90f7509285b08c791eac56c1a119f91120cf556 | 23,477 |
import jinja2
def render_to_string(backend, filename, context):
# type: (str, str, Dict) -> str
"""
Render a template using the specified context
:param backend: The backend for which the template is rendered
:param filename: The template name
:param context: The data to use when rendering the template
:return: The rendered template as a string
"""
template_directory = "./swagger_django_generator/templates/{}".format(backend)
loaders = [jinja2.FileSystemLoader(template_directory)]
try:
loaders.append(jinja2.PackageLoader("swagger_django_generator", "templates/{}".format(backend)))
except ImportError:
pass
environment = jinja2.Environment(
loader=jinja2.ChoiceLoader(loaders),
trim_blocks=True,
lstrip_blocks=True,
)
environment.filters["clean_schema"] = clean_schema
environment.filters["parse_array"] = parse_array
environment.filters["capitalize_splitter"] = capitalize_splitter
return environment.get_template(filename).render(context) | c645a9867acdb50236a5604144a104cb38e841f9 | 23,478 |
def customfield_by_name(self, name):
"""
Get the value of a customfield by name
"""
# Get all fields from Jira. This is expensive, so only do it once
if not hasattr(self, '_fields'):
response = self._session.get(
self._base_url.format(
server=self._options['server'],
rest_path=self._options['rest_path'],
rest_api_version=self._options['rest_api_version'],
path='field',
),
auth=self._session.auth,
)
if response.status_code != 200:
raise JIRAError(response.text)
else:
self._fields = response.json()
for field in self._fields:
if field.get('name') == name:
break
else:
raise JIRAError('Could not find customfield')
return getattr(self.fields, field.get('id')) | 35f7ee1e88029201086fc75bbc280beb386cca44 | 23,479 |
from pathlib import Path
def download_images(imgs):
"""Save any images on page to local directory"""
had_download_issue = False
for img in imgs:
image_url = 'https://projecteuler.net/{}'.format(img.get('src'))
logger.info(f'downloading image {image_url}')
image_name = Path(image_url).name
image = get_the_response(image_url)
if image:
(LOCAL_IMAGES_DIR / image_name).write_bytes(image.content)
else:
had_download_issue = True
return not had_download_issue | 7d39dff40797a698215a589f9ff65f3df4a85e9f | 23,480 |
def admin_order_pdf(request, order_id):
"""
1. Get data (and templates for displaying data)
2. Set type (cuz you'll need to download it, right?)
3. Using the module (configuring stuff, e.g. the CSS :P)
"""
order = get_object_or_404(Order, id=order_id)
html = render_to_string('orders/order/pdf.html',
{ 'order': order })
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = (
'filename=order_{}.pdf'.format(order.id)
)
weasyprint.HTML(string=html).write_pdf(
response,
stylesheets=[
weasyprint.CSS(
settings.STATIC_ROOT + 'css/pdf.css'
)
]
)
return response | c4cf5a38743f573ef8dfa704cfe2d12bb47a679c | 23,481 |
import traceback
def delete_container(request, container):
""" Deletes a container """
storage_url = request.session.get('storage_url', '')
#meta_storage_url = request.session.get('meta_storage_url', '')
auth_token = request.session.get('auth_token', '')
#meta_auth_token = request.session.get('meta_auth_token', '')
username = request.session.get('username', '')
project_id = request.session.get('project_id','')
try:
conn = EncSwiftclientAPI(auth_token, project_id)
conn.delete_container(container)
messages.add_message(request, messages.INFO, _("Container deleted."))
except client.ClientException:
traceback.print_exc()
messages.add_message(request, messages.ERROR, _("Access denied. If there are some files in %s, before delete them!" % container))
return redirect(containerview) | ce205d6112239905707064f0357b6c19fe3bd688 | 23,482 |
def dense_encoder(X, params):
"""Dense model encoder subgraph that produces latent matrix.
Given data matrix tensor X and dictionary of parameters, process through dense
model encoder subgraph and return encoder latent vector for each example in
batch.
Args:
X: tf.float64 matrix tensor of input data.
params: Dictionary of parameters.
Returns:
tf.float64 matrix tensor encoder latent vector for each example in batch.
"""
# Create the input layer to our DNN
network = X
# Add hidden layers with the given number of units/neurons per layer
for units in params["enc_dnn_hidden_units"]:
network = tf.layers.dense(
inputs=network,
units=units,
activation=tf.nn.relu)
return tf.layers.dense(
inputs=network,
units=params["latent_vector_size"],
activation=tf.nn.relu) | 1dfe2b876cb32b5d8b89e70e451a732730762a14 | 23,483 |
def __asset_inventory_espanol(asset):
""" Renombra los encabezados del inventario de bases de datos de Datos \
Abiertos Colombia a términos en español.
:param asset: (pandas.DataFrame) - Tabla de inventario del portal de datos\
abiertos Colombia (https://www.datos.gov.co).
:return: base de datos en formato dataframe.
"""
lista_columnas = list(DIC_RENAME.keys())
asset = asset[lista_columnas].rename(columns=DIC_RENAME)
# Cambiar las fechas
asset["fecha_creacion"] = asset["fecha_creacion"].apply(lambda x: x[0:10])
asset["fecha_actualizacion"] = asset["fecha_actualizacion"].apply(
lambda x: x[0:10])
# Pasar filas y columnas a float
asset["filas"] = asset["filas"].astype(float)
asset["columnas"] = asset["columnas"].astype(float)
# Traducir las categorías de 'base_publica'
asset["base_publica"] = asset["base_publica"].map(
{"published": "Si", "unpublished": "No"})
# Traducir las categorías de
asset["tipo"] = asset["tipo"].map({
"dataset": "conjunto de datos",
"federatet_href": "enlace externo",
"href": "enlace externo",
"map": "mapa",
"chart": "grafico",
"filter": "vista filtrada",
"file": "archivo o documento",
"visualization": "visualizacion",
"story": "historia",
"datalens": "lente de datos",
"form": "formulario",
"calendar": "calendario",
"invalid_datatype": "tipo_invalido"})
return asset | dfb508cec458ecb63c371849d84cb3b3d79335ba | 23,484 |
def end_of_sign_found(token: str, preceding_token: str):
"""
This function receives a token and its preceding token and returns whether that token ends an Akkadian sign.
"""
if not preceding_token:
return False
if '-' in token or '.' in token:
return True
if not preceding_token.endswith('-') and not token.startswith('##'):
return True
return False | 30024ddad31c3149d1d2363842b085d2923c1387 | 23,485 |
from typing import Optional
from typing import Dict
import datasets
def get_loaders(
dataset: str, batch_size: int, num_workers: Optional[int]
) -> Dict[str, DataLoader]:
"""Init loaders based on parsed parametrs.
Args:
dataset: dataset for the experiment
batch_size: batch size for loaders
num_workers: number of workers to process loaders
Returns:
{"train":..., "valid":...}
"""
transforms = datasets[dataset]["train_transform"]
transform_original = datasets[dataset]["valid_transform"]
train_data = SelfSupervisedDatasetWrapper(
datasets[dataset]["dataset"](root="data", train=True, transform=None, download=True),
transforms=transforms,
transform_original=transform_original,
)
valid_data = SelfSupervisedDatasetWrapper(
datasets[dataset]["dataset"](root="data", train=False, transform=None, download=True),
transforms=transforms,
transform_original=transform_original,
)
train_loader = DataLoader(train_data, batch_size=batch_size, num_workers=num_workers)
valid_loader = DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers)
return {"train": train_loader, "valid": valid_loader} | 2340d05f69057bcb034a8ec4ad5515055d0bde71 | 23,488 |
def pipeline():
""" Creates a pipeline configured to use a given model with a specified configuration.
Notes
-----
Pipeline can be executed only if its config contains the following parameters:
model_class : TFModel
Architecture of model. List of available models is defined at 'AVAILABLE_MODELS'.
model_config : Config
Model parameters.
Returns
-------
Pipeline
A pipeline that contains model initialization and training with a given config.
"""
test_pipeline = (Pipeline()
.init_variable('current_loss')
.init_model('dynamic', C('model_class'),
'model', C('model_config'))
.to_array()
.train_model('model',
fetches='loss',
images=B('images'),
labels=B('labels'),
save_to=V('current_loss'))
)
return test_pipeline | f8fbbe3898b58b1b1621d742e4acdf80f17ba11c | 23,489 |
import copy
def get_screen_point_array(width: float, height: float):
"""Get screen points(corners) in pixels from normalized points_in_square
:param width: screen width
:param height: screen height
:return:
"""
points = copy.deepcopy(points_in_square)
for i in range(len(points_in_square)):
points[i] = points[i][0] * width, points[i][1] * height
result = list_points_to_triangle(points)
return np.array(result, dtype=np.float32) | 34d88ddb1a24e4e3ebc81f0c7e99530548ed8a8b | 23,490 |
def get_spacing_matrix(size, spacing, offset):
"""Returns a sparse matrix LinOp that spaces out an expression.
Parameters
----------
size : tuple
(rows in matrix, columns in matrix)
spacing : int
The number of rows between each non-zero.
offset : int
The number of zero rows at the beginning of the matrix.
Returns
-------
LinOp
A sparse matrix constant LinOp.
"""
val_arr = []
row_arr = []
col_arr = []
# Selects from each column.
for var_row in range(size[1]):
val_arr.append(1.0)
row_arr.append(spacing*var_row + offset)
col_arr.append(var_row)
mat = sp.coo_matrix((val_arr, (row_arr, col_arr)), size).tocsc()
return lu.create_const(mat, size, sparse=True) | 5871385bcdcb9ce538fe1e4525c947c2cfa582c9 | 23,491 |
def next_power2(x):
"""
:param x: an integer number
:return: the power of 2 which is the larger than x but the smallest possible
>>> result = next_power2(5)
>>> np.testing.assert_equal(result, 8)
"""
return 2 ** np.ceil(np.log2(x)).astype(int) | 379c2170d0dbd25ee01a47eb0765f4dfd143efbb | 23,492 |
def category_induced_page():
"""Form to compute the Category induced."""
return render_template('category-induced.html') | 176af8bbbb67afce78c11483f66b3d5ac15f6d76 | 23,493 |
import array
from operator import concat
def zext(value, n):
"""Extend `value` by `n` zeros"""
assert (isinstance(value, (UInt, SInt, Bits)) or
(isinstance(value, Array) and issubclass(value.T, Digital)))
if not is_int(n) or n < 0:
raise TypeError(f"Expected non-negative integer, got '{n}'")
if n == 0:
return value
if isinstance(value, UInt):
zeros = uint(0, n)
elif isinstance(value, SInt):
zeros = sint(0, n)
elif isinstance(value, Bits):
zeros = bits(0, n)
elif isinstance(value, Array):
zeros = array(0, n)
result = concat(value, zeros)
if isinstance(value, UInt):
return uint(result)
elif isinstance(value, SInt):
return sint(result)
elif isinstance(value, Bits):
return bits(result)
return result | dfd666446f1b93ebdeeb94b932d8de7b243f6a4e | 23,494 |
import math
def _distance(point0, point1, point2, seg_len):
"""Compute distance between point0 and segment [point1, point2]. Based on Mark McClure's
PolylineEncoder.js."""
if (point1[0] == point2[0]) and (point1[1] == point2[1]):
out = _dist(point0, point2)
else:
uuu = ((point0[0] - point1[0]) * (point2[0] - point1[0]) +
(point0[1] - point1[1]) * (point2[1] - point1[1])) / seg_len
if uuu <= 0:
out = _dist(point0, point1)
elif uuu >= 1:
out = _dist(point0, point2)
else:
out = math.sqrt(math.pow((point0[0] - point1[0]) - (uuu * (point2[0] - point1[0])), 2) +
math.pow((point0[1] - point1[1]) - (uuu * (point2[1] - point1[1])), 2))
return out | 1927a5fe46dcb0245031b395aade67ec01270930 | 23,495 |
def delete_node(
graph: xpb2.GraphProto,
node_name: str = "",
**kwargs):
""" Add node appends a node to graph g and returns the extended graph
Prints a message and returns False if fails.
Args:
graph: A graph, onnx.onnx_ml_pb2.GraphProto.
node_name: Name of the node to remove.
**kwargs
Returns:
The extended graph.
"""
if type(graph) is not xpb2.GraphProto:
_print("The graph is not a valid ONNX graph.")
return False
if not node_name:
_print("Please specify a node name.")
return False
found = False
try:
for elem in graph.node:
if elem.name == node_name:
graph.node.remove(elem)
found = True
except Exception as e:
_print("Unable to iterate the nodes. " + str(e))
return False
if not found:
_print("Unable to find the node by name.")
return False
return graph | 620e325a0ea9da7cd83e897fee49fb6ef9183da4 | 23,496 |
from PIL import Image
def image_to_term256(pil_image):
"""Convert image to a string that resembles it when printed on a terminal
Needs a PIL image as input and a 256-color xterm for output.
"""
result = []
im = pil_image.convert('RGBA')
try:
except ImportError:
im.thumbnail((80, 80))
else:
im.thumbnail((80, 80), Image.ANTIALIAS)
width, height = im.size
for y in range(height // 2):
try:
for x in range(width):
result.append('\033[48;5;%dm\033[38;5;%dm' % (
term256color(*im.getpixel((x, y * 2))),
term256color(*im.getpixel((x, y * 2 + 1)))))
result.append('\N{LOWER HALF BLOCK}')
finally:
result.append('\033[0m\n')
return ''.join(result) | 482f6c868adf5f302d88898abeff426d9ed000e7 | 23,497 |
def false_discovery(alpha,beta,rho):
"""The false discovery rate.
The false discovery rate is the probability that an observed edge is
incorrectly identified, namely that is doesn't exist in the 'true' network.
This is one measure of how reliable the results are.
Parameters
----------
alpha : float
The estimate of the true-positive rate.
beta : float
The estimate of the false-positive rate.
rho : float
The estimate of network density.
Returns
-------
float
The false discovery rate (probability).
References
----------
.. [1] Newman, M.E.J. 2018. “Network structure from rich but noisy data.”
Nature Physics 14 6 (June 1): 542–545. doi:10.1038/s41567-018-0076-1.
"""
return (1-rho)*beta/(rho*alpha + (1-rho)*beta) | 849c236157070c5d1becfec3e4e5f46a63d232d2 | 23,498 |
def add_default_legend(axes, subplots, traces):
"""
Add legend to the axes of the plot. This is needed to be done using matplotlib shapes
rather than the build in matplotlib legend because otherwise the animation will add
a legend at each time step rather than just once.
Parameters
----------
axes: axes object
the axes of the matplotlib figure
subplots: int
number of subplots in the figure
traces: list of dictionaries
a list of dictionaries where each dictionary corresponds to one of the passed in filenames or dataframes,
the keys of the dictionaries are subplots (0-indexed), and the values are a list of values for
that subplot from that filename (ex. traces = [{0: ["bg", "bg_sensor"], 1: ["iob"], 2: ["sbr"]}])
Returns
-------
"""
# Add the corresponding shape and label for each field in the plot to the legend
for subplot in range(subplots):
legend_items = []
for trace_dict in traces:
if subplot in trace_dict.keys():
for field in trace_dict[subplot]:
features = get_features_dictionary(field)
legend_items.append(
Line2D(
[0],
[0],
color=features["color"],
label=features["legend_label"],
marker=features["marker"],
markersize=3,
linestyle=features["linestyle"],
)
)
# Syntax is slightly different if there is only 1 subplot
if subplots < 2:
add_to = axes
else:
add_to = axes[subplot]
add_to.legend(handles=legend_items, loc="upper right")
# Return the updated axes
return axes | d352c1d90dac882f687be426d63dea35dca4ba46 | 23,499 |
def split_data(n_samps, percent_test):
"""
:param n_samps: number of data samples
:param percent_test: percent of data to hold out
:return: two sets of indices corresponding to training and validation data
"""
# generate and randomly shuffle
idx = np.arange(n_samps)
np.random.shuffle(idx)
# determine cut-point
i_cut = int(n_samps * (1 - percent_test))
# generate train and test indices
i_train = idx[:i_cut]
i_valid = idx[i_cut:]
return i_train, i_valid | 68d63d28b2aaab2697f2aab70fc7341a9a31811d | 23,501 |
def compute_totals(songs, limit_n, save_file=None):
"""
Return array of shape (4, 3, 35) representing counts for
each group of each context type of each label
"""
totals = np.zeros((4, 3, 35), dtype='int32')
i = 0
for song_path, beatmap_ids in songs:
print('song {}'.format(i))
spectrogram = np.load(song_path)
beatmap_data = [db.beatmap_data(beatmap_id) for beatmap_id in
beatmap_ids]
counts = get_counts(beatmap_data, spectrogram, limit_n=limit_n)
totals[:] = totals + counts
i += 1
if save_file:
np.save(save_file, totals)
return totals | d8e845912d6e1b5e0fab864e8a19cdc08500b4c5 | 23,502 |
def _initialize_arrays(initial_values,
num_steps):
"""Construct a structure of `TraceArray`s from initial values."""
trace_arrays = tf.nest.map_structure(
lambda t: tf.TensorArray( # pylint: disable=g-long-lambda
dtype=t.dtype,
size=num_steps, # Initial size.
clear_after_read=False, # Allow reading->tiling final value.
element_shape=t.shape),
initial_values)
return tf.nest.map_structure(
lambda ta, t: ta.write(0, t), trace_arrays, initial_values) | f63e13f35aade7979b4090964c593c2d222e94bd | 23,503 |
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8) | 5012d34ab9974e88bfc7dae4683521313fd37cd0 | 23,504 |
def start_of_next_clk_period(time: float, clk_period: float):
"""
:return: start time of next clk period
"""
return (start_clk(time, clk_period) + 1) * clk_period | d59dafc3a8fdec9d199dcf379eefce52267ea4c1 | 23,506 |
import re
def eval_formula(formula, assignment):
""" Evaluates a formula represented as a string.
**Attention**: Be extremely careful about what to pass to this function.
All parameters are plugged into the formula and evaluated using `eval()`
which executes arbitrary python code.
Parameters
----------
formula : str
String representation of the formula to be evaluated.
assignment : dict
Dictionary containing parameter names and values as keys and values,
respectively.
Returns
-------
float
Evaluation result.
Examples
--------
>>> eval_formula('a + (1 - b) * a', {'a': 0.1, 'b': 0.8})
0.12
"""
expression = formula
for param, value in sorted(assignment.items(), reverse=True):
expression = expression.replace(param, str(value))
# remove leading 0's
expression = re.sub(r'\d-0\d', lambda x: re.sub(r'-0', '-', x[0]), expression)
# pylint: disable=eval-used
return eval(expression)
# pylint: enable=eval-used | c1f344fc0049e20e86feb2428a46d51f9eee5898 | 23,507 |
def soil_temperature(jth: int, states: States, weather: Weather): # j = 1,2,..,5
"""
Equation 2.4 / 8.4
cap_soil_j * soil_j_t = sensible_heat_flux_soil_j_minus_soil_j - sensible_heat_flux_soil_j_soil_j_plus
0 is Floor, 6 is SoOut
"""
h_soil_j_minus = Coefficients.Floor.floor_thickness if jth == 1 else Coefficients.Soil.soil_thicknesses[jth - 2]
h_soil_j = Coefficients.Soil.soil_thicknesses[jth - 1]
h_soil_j_plus = 1.28 if jth == 5 else Coefficients.Soil.soil_thicknesses[jth] # Assumed by GreenLight's authors, line 83, setGlParams
cap_soil_j = h_soil_j * Coefficients.Soil.rho_c_p_So
soil_heat_conductivity = Coefficients.Soil.soil_heat_conductivity
HEC_soil_j_minus_soil_j = 2 * soil_heat_conductivity / (h_soil_j_minus + h_soil_j)
HEC_soil_j_soil_j_plus = 2 * soil_heat_conductivity / (h_soil_j + h_soil_j_plus)
soil_j_minus_t = states.floor_t if jth == 1 else states.soil_j_t[jth - 2]
soil_j_t = states.soil_j_t[jth - 1]
soil_j_plus_t = weather.soil_out_t if jth == 5 else states.soil_j_t[jth]
sensible_heat_flux_soil_j_minus_soil_j = convective_and_conductive_heat_fluxes(HEC_soil_j_minus_soil_j, soil_j_minus_t, soil_j_t)
sensible_heat_flux_soil_j_soil_j_plus = convective_and_conductive_heat_fluxes(HEC_soil_j_soil_j_plus, soil_j_t, soil_j_plus_t)
return (sensible_heat_flux_soil_j_minus_soil_j - sensible_heat_flux_soil_j_soil_j_plus) / cap_soil_j | ddd3e50b30dc1240d5f6c6200aea710beba6b498 | 23,508 |
def clean_user_data(model_fields):
"""
Transforms the user data loaded from
LDAP into a form suitable for creating a user.
"""
# Create an unusable password for the user.
model_fields["password"] = make_password(None)
return model_fields | 9b9f968c4a775527dac36597ecadee476549dc7d | 23,509 |
import json
def case_structure_generator(path):
"""Create test cases from reference data files."""
with open(str(path), 'r') as in_f:
case_data = json.load(in_f)
system_dict = case_data['namelists']['SYSTEM']
ibrav = system_dict['ibrav']
ins = {'ibrav': ibrav, 'cell': case_data['cell']}
if '-' in path.name:
_, qe_version_with_suffix = path.name.split('-')
qe_version, _ = qe_version_with_suffix.rsplit('.', 1)
else:
qe_version = None
ins = {'ibrav': ibrav, 'cell': case_data['cell'], 'qe_version': qe_version}
if ibrav == 0:
return ins, None, ValueError
outs = dict()
for key in (['a', 'b', 'c', 'cosab', 'cosac', 'cosbc'] +
['celldm({})'.format(i) for i in range(1, 7)]):
if key in system_dict:
outs[key] = system_dict[key]
return ins, outs, None | 1c7249c207032ed623bbfe274ed117283cd6ef4d | 23,510 |
from typing import Optional
from typing import Type
from typing import Dict
from typing import List
from typing import Any
def load_ascii(file: 'BinaryFile', # pylint: disable=unused-argument,keyword-arg-before-vararg
parser: 'Optional[Type[ASCIIParser]]' = None,
type_hook: 'Optional[Dict[str, Type[BaseType]]]' = None,
enum_namespaces: 'Optional[List[str]]' = None, bare: bool = False,
*args: 'Any', **kwargs: 'Any') -> 'ASCIIInfo':
"""Parse ASCII log file.
Args:
file: Log file object opened in binary mode.
parser (:class:`~zlogging.loader.ASCIIParser`, optional): Parser class.
type_hook (:obj:`dict` mapping :obj:`str` and :class:`~zlogging.types.BaseType` class, optional):
Bro/Zeek type parser hooks. User may customise subclasses of
:class:`~zlogging.types.BaseType` to modify parsing behaviours.
enum_namespaces (:obj:`List[str]`, optional): Namespaces to be loaded.
bare (:obj:`bool`, optional): If ``True``, do not load ``zeek`` namespace by default.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
The parsed ASCII log data.
"""
if parser is None:
parser = ASCIIParser
ascii_parser = parser(type_hook, enum_namespaces, bare)
return ascii_parser.parse_file(file) | fa7f0ba4a98dc295fb23651373a6489dad5c205e | 23,511 |
def differences_dict(input_dict):
"""Create a dictionary of combinations of readers to create bar graphs"""
# Getting the combinations of the formats
for each_case in input_dict.keys():
comb = combinations(input_dict[each_case].keys(), 2)
x = list(comb)
comp_values = {}
comp_values[each_case] = {}
for each in x:
name = each[0].split("_")[0] + " vs " + each[1].split("_")[0]
comp_values[each_case][name] = {}
comp_values[each_case][name]["R0"] = []
comp_values[each_case][name]["X0"] = []
comp_values[each_case][name]["R1"] = []
comp_values[each_case][name]["X1"] = []
for (k, v), (k1, v1) in zip(
input_dict[each_case][each[0]].items(),
input_dict[each_case][each[1]].items(),
):
comp_values[each_case][name]["R0"].append(abs(v[0] - v1[0]))
comp_values[each_case][name]["X0"].append(abs(v[1] - v1[1]))
comp_values[each_case][name]["R1"].append(abs(v[2] - v1[2]))
comp_values[each_case][name]["X1"].append(abs(v[3] - v1[3]))
return comp_values | a15ef7bab8a9abaf556e1ce97a4c695b50d5b460 | 23,512 |
import psutil
def available_memory():
"""
Returns total system wide available memory in bytes
"""
return psutil.virtual_memory().available | 5071312f64aa37e1d777c8f20009fa38137381a4 | 23,513 |
from typing import List
from typing import Set
import numpy
def get_hypergraph_incidence_matrix(node_list: List[Node],
hyperedge_list: List[Set[Node]]
) -> numpy.array:
"""Get the incidence matrix of a hypergraph"""
node_to_index = {node: index for index, node in enumerate(node_list)}
incidence_matrix = numpy.zeros((len(node_list), len(hyperedge_list)),
dtype=int)
for hyperedge_index, hyperedge in enumerate(hyperedge_list):
for node in hyperedge:
incidence_matrix[node_to_index[node], hyperedge_index] = 1
return incidence_matrix | 706bdd53a1fefec3ee3f77fa79248361ffff0351 | 23,515 |
from re import X
def fformat(last_data, last_records):
"""
@param last_data: dictionary(node_name => node's data segment)
@param last_records: dictionary(node_name => timestamp, node when
last transmitted)
@return: html
"""
nodelist = last_data.keys()
a = repr(map(str, nodelist))
b = ''.join(['<div id="'+x+'" class="node"></div>' for x in nodelist])
return (X % (a, b)).encode('utf8') | 28da148b43c616652872cabc7815cba51dafd16c | 23,516 |
import math
def ceil(base):
"""Get the ceil of a number"""
return math.ceil(float(base)) | ebe78a5eb8fa47e6cfba48327ebb1bdc469b970d | 23,517 |
def train(
dir,
input_s3_dir,
output_s3_dir,
hyperparams_file,
ec2_type,
volume_size,
time_out,
docker_tag,
aws_role,
external_id,
base_job_name,
job_name,
use_spot_instances=False,
metric_names=None,
tags=None
):
"""
Trains ML model(s) on SageMaker
:param dir: [str], source root directory
:param input_s3_dir: [str], S3 location to input data
:param output_s3_dir: [str], S3 location to save output (models, etc)
:param hyperparams_file: [str], path to hyperparams json file
:param ec2_type: [str], ec2 instance type. Refer to:
https://aws.amazon.com/sagemaker/pricing/instance-types/
:param volume_size: [int], size in GB of the EBS volume
:param time_out: [int], time-out in seconds
:param docker_tag: [str], the Docker tag for the image
:param aws_role: [str], the AWS role assumed by SageMaker while training
:param external_id: [str], Optional external id used when using an IAM role
:param base_job_name: [str], Optional prefix for the SageMaker training job
:param job_name: [str], Optional name for the SageMaker training job. Overrides `base_job_name`
:param use_spot_instances: bool, default=False], Specifies whether to use SageMaker
Managed Spot instances for training.
More information:
https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html
(default: ``False``).
:param metric_names: [list[str], default=None], Optional list of string metric names
:param tags: [optional[list[dict]], default: None], List of tags for labeling a training
job. For more, see https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html. Example:
[
{
'Key': 'key_name_1',
'Value': key_value_1,
},
{
'Key': 'key_name_2',
'Value': key_value_2,
},
...
]
:return: [str], S3 model location
"""
config = _read_config(dir)
hyperparams_dict = _read_hyperparams_config(hyperparams_file) if hyperparams_file else None
sage_maker_client = sagemaker.SageMakerClient(config.aws_profile, config.aws_region, aws_role, external_id)
image_name = config.image_name+':'+docker_tag
return sage_maker_client.train(
image_name=image_name,
input_s3_data_location=input_s3_dir,
train_instance_count=1,
train_instance_type=ec2_type,
train_volume_size=volume_size,
train_max_run=time_out,
output_path=output_s3_dir,
hyperparameters=hyperparams_dict,
base_job_name=base_job_name,
job_name=job_name,
use_spot_instances=use_spot_instances,
tags=tags,
metric_names=metric_names
) | 7cd92363bcde8dc86989a8932236cd4b2961b0e3 | 23,519 |
def wikitext_page(d, e, title, fmt='wikitext'):
"""Create infobox with stats about a single page from a category.
Create infobox with stats about a single page from a category. Currently only supports formatting as wikitext.
Only returns the string of the text, does not save any files or modify other data structures."""
datum = d['stats']['scrape_start'][:10]
date_from = d['stats']['date_from']
date_to = d['stats']['date_to']
date_days = d['stats']['pv_days']
desc = f"Sidvisningsstatistik {datum} för tidsperioden {date_from}--{date_to} ({date_days} dagar)\n\n"
page_stats = d['pages'][title]['stats']
if fmt == 'wikitext':
text = f"{desc}\n\n\n"
table = table_start([colspan('Sidinformation')], [], cellpadding=3, cls='wikitable')
table += f"|Visningar || align='right' | {page_stats['pageviews_sv']}\n|-\n"
table += f"|Längd || align='right' | {page_stats['len_sv']}\n|-\n"
table += f"|Kvalitet || align='right' | {page_stats['quality']}\n|-\n"
if 'len_fi' in page_stats:
table += f"|Visningar Finska || align='right' | {page_stats['pageviews_fi']}\n|-\n"
table += f"|Längd Finska || align='right' | {page_stats['len_fi']}\n|-\n"
if 'len_en' in page_stats:
table += f"|Visningar Engelska || align='right' | {page_stats['pageviews_en']}\n|-\n"
table += f"|Längd Engelska || align='right' | {page_stats['len_en']}\n|-\n"
if 'len_de' in page_stats:
table += f"|Visningar Tyska || align='right' | {page_stats['pageviews_de']}\n|-\n"
table += f"|Längd Tyska || align='right' | {page_stats['len_de']}\n|-\n"
table += f"|Kategorier || align='right' | {page_stats['categories_cnt']}\n|-\n"
table += f"|Kontributörer || align='right' | {page_stats['contributors_tot']}\n|-\n"
table += f"|Antal andra språk || align='right' | {page_stats['langlinks_cnt']}\n|-\n"
table += f"|Externa länkar || align='right' | {page_stats['extlinks_cnt']}\n|-\n"
table += f"|Bilder || align='right' | {page_stats['images_cnt']}\n|-\n"
table += f"|Länkar || align='right' | {page_stats['links_cnt']}\n|-\n"
table += f"|Omdirigeringar || align='right' | {page_stats['redirects_cnt']}\n|-\n"
table += f"|Länkar till denna sida || align='right' | {page_stats['linkshere_cnt']}\n|-\n"
table += "|}\n\n"
text += table
text += """Kvalitet räknas ut med formeln:
Kvalitet =
3 * antalet kategorier +
4 * antalet bilder +
4 * antalet andra språk +
1 * antalet länkar +
1 * antalet länkar till denna sida +
2 * externa länkar +
3 * antalet omdirigeringar +
1 * antalet kontributörer
"""
return text
elif fmt == 'print':
text = f"Visningar---------------{page_stats['pageviews_sv']}\n"
text += f"Längd-------------------{page_stats['len_sv']}\n"
text += f"Kvalitet----------------{page_stats['quality']}\n"
if 'len_fi' in page_stats:
text += f"Visningar Finska--------{page_stats['pageviews_fi']}\n"
text += f"Längd Finska------------{page_stats['len_fi']}\n"
if 'len_en' in page_stats:
text += f"Visningar Engelska------{page_stats['pageviews_en']}\n"
text += f"Längd Engelska----------{page_stats['len_en']}\n"
if 'len_de' in page_stats:
text += f"Visningar Tyska---------{page_stats['pageviews_de']}\n"
text += f"Längd Tyska-------------{page_stats['len_de']}\n"
text += f"Kategorier--------------{page_stats['categories_cnt']}\n"
text += f"Kontributörer-----------{page_stats['contributors_tot']}\n"
text += f"Antal andra språk-------{page_stats['langlinks_cnt']}\n"
text += f"Externa länkar----------{page_stats['extlinks_cnt']}\n"
text += f"Bilder------------------{page_stats['images_cnt']}\n"
text += f"Länkar------------------{page_stats['links_cnt']}\n"
text += f"Omdirigeringar----------{page_stats['redirects_cnt']}\n"
text += f"Länkar till denna sida--{page_stats['linkshere_cnt']}\n"
return text | 1288a1fea5bc54ef6243089eea0578cc43ec311e | 23,520 |
from typing import Tuple
def _quadratic(
self: qp.utils.Minimize[Vector],
direction: Vector,
step_size_test: float,
state: qp.utils.MinimizeState[Vector],
) -> Tuple[float, float, bool]:
"""Take a quadratic step calculated from an energy-only test step.
Adjusts step size to back off if energy increases."""
# Check initial point:
step_size_prev = 0.0 # cumulative progress along direction
E = self._sync(float(state.energy))
E_orig = E
g_d = self._sync(state.gradient.overlap(direction))
if g_d >= 0.0:
qp.log.info(
f"{self.name}: Bad step direction with positive" " gradient component"
)
return E_orig, step_size_prev, False
# Test step and quadratic step size prediction:
for i_step in range(self.step_size.n_adjust):
# Check test step size:
if step_size_test < self.step_size.minimum:
qp.log.info(f"{self.name}: Test step size below threshold.")
return E, step_size_prev, False
# Try test step:
self.step(direction, step_size_test - step_size_prev)
step_size_prev = step_size_test
E_test = self._compute(state, energy_only=True) # gradient not needed
# Check if step left valid domain:
if not np.isfinite(E_test):
# Back off from difficult region
step_size_test *= self.step_size.reduce_factor
qp.log.info(
f"{self.name}: Test step failed with"
f" {state.energy.name} = {E_test:.3e};"
f" reducing test step size to {step_size_test:.3e}."
)
continue
# Predict step size (quadratic based on gradient and two energies):
step_size = (
0.5 * (step_size_test ** 2) * g_d / (step_size_test * g_d + E - E_test)
)
# Check reasonableness of predicted step:
if step_size < 0.0:
# Curvature has wrong sign, but E_test < E, so accept step
# for now and try descending further next time:
step_size_test *= self.step_size.grow_factor
qp.log.info(
f"{self.name}: Wrong curvature in test step,"
f" growing test step size to {step_size_test:.3e}."
)
E = self._compute(state, energy_only=False)
return E, step_size_prev, True
if step_size / step_size_test > self.step_size.grow_factor:
step_size_test *= self.step_size.grow_factor
qp.log.info(
f"{self.name}: Predicted step size growth"
f" > {self.step_size.grow_factor},"
f" growing test step size to {step_size_test:.3e}."
)
continue
if step_size / step_size_test < self.step_size.reduce_factor:
step_size_test *= self.step_size.reduce_factor
qp.log.info(
f"{self.name}: Predicted step size reduction"
f" < {self.step_size.reduce_factor},"
f" reducing test step size to {step_size_test:.3e}."
)
continue
# Successful test step:
break
if not np.isfinite(E_test):
qp.log.info(
f"{self.name}: Test step failed {self.step_size.n_adjust}"
" times. Quitting step."
)
return E_orig, step_size_prev, False
# Actual step:
for i_step in range(self.step_size.n_adjust):
# Try the step:
self.step(direction, step_size - step_size_prev)
step_size_prev = step_size
E = self._compute(state, energy_only=False)
if not np.isfinite(E):
step_size *= self.step_size.reduce_factor
qp.log.info(
f"{self.name}: Step failed with"
f" {state.energy.name} = {E:.3e};"
f" reducing step size to {step_size:.3e}."
)
continue
if E > E_orig + self.energy_threshold:
step_size *= self.step_size.reduce_factor
qp.log.info(
f"{self.name}: Step increased"
f" {state.energy.name} by {E - E_orig:.3e};"
f" reducing step size to {step_size:.3e}."
)
continue
# Step successful:
break
if (not np.isfinite(E)) or (E > E_orig + self.energy_threshold):
qp.log.info(
f"{self.name}: Step failed to reduce {state.energy.name}"
f" after {self.step_size.n_adjust} attempts."
" Quitting step."
)
return E_orig, step_size_prev, False
return E, step_size_prev, True | 49b2e75d0ae39e968e7288690dea7d42c423a2df | 23,521 |
def create_image(ds: "Dataset", data_element: "DataElement") -> "gdcm.Image":
"""Return a ``gdcm.Image``.
Parameters
----------
ds : dataset.Dataset
The :class:`~pydicom.dataset.Dataset` containing the Image
Pixel module.
data_element : gdcm.DataElement
The ``gdcm.DataElement`` *Pixel Data* element.
Returns
-------
gdcm.Image
"""
image = gdcm.Image()
number_of_frames = getattr(ds, 'NumberOfFrames', 1)
image.SetNumberOfDimensions(2 if number_of_frames == 1 else 3)
image.SetDimensions((ds.Columns, ds.Rows, number_of_frames))
image.SetDataElement(data_element)
pi_type = gdcm.PhotometricInterpretation.GetPIType(
ds.PhotometricInterpretation
)
image.SetPhotometricInterpretation(
gdcm.PhotometricInterpretation(pi_type)
)
tsyntax = ds.file_meta.TransferSyntaxUID
ts_type = gdcm.TransferSyntax.GetTSType(str.__str__(tsyntax))
image.SetTransferSyntax(gdcm.TransferSyntax(ts_type))
pixel_format = gdcm.PixelFormat(
ds.SamplesPerPixel,
ds.BitsAllocated,
ds.BitsStored,
ds.HighBit,
ds.PixelRepresentation
)
image.SetPixelFormat(pixel_format)
if 'PlanarConfiguration' in ds:
image.SetPlanarConfiguration(ds.PlanarConfiguration)
return image | 95f96b0f666903529811fbf3aaeb71305dfcb1bc | 23,522 |
def linear_interpolate_by_datetime(datetime_axis, y_axis, datetime_new_axis,
enable_warning=True):
"""A datetime-version that takes datetime object list as x_axis
"""
numeric_datetime_axis = [
totimestamp(a_datetime) for a_datetime in datetime_axis
]
numeric_datetime_new_axis = [
totimestamp(a_datetime) for a_datetime in datetime_new_axis
]
return linear_interpolate(
numeric_datetime_axis, y_axis, numeric_datetime_new_axis,
enable_warning=enable_warning) | 515eb1e389b711ff3add707abe91bf577b38192d | 23,523 |
def calculate_index(
target_ts: pd.Timestamp, timestamps: pd.DatetimeIndex
) -> pd.Timestamp:
"""
Return the first index value after the target timestamp if the exact timestamp is not available
"""
# noinspection PyUnresolvedReferences
target_beyond_available = (target_ts > timestamps).all()
if target_beyond_available:
return timestamps[-1]
elif target_ts in timestamps:
return target_ts
else:
return timestamps[timestamps > target_ts][0] | db1ad3130b3763115cb88e8798618d9632996bd7 | 23,524 |
from typing import OrderedDict
import pydoc
def walk_through_package(package):
"""
Get the documentation for each of the modules in the package:
Args:
package: An imported python package.
Returns:
output: A dictionary with documentation strings for each module.
"""
output = OrderedDict()
modules = pydoc.inspect.getmembers(package, pydoc.inspect.ismodule)
for mod in modules:
module_name, reference = mod
output[module_name] = getmodule(module_name, reference)
return output | 9ad0e9d935a812608fb42af788c1ae6746b78684 | 23,525 |
import gzip
def extract_images_2(f):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth].
Args:
f: A file object that can be passed into a gzip reader.
Returns:
data: A 4D unit8 numpy array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %
(magic, f.name))
num_images = np.int64(_read32(bytestream))
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data | eb3b44051c6cc3721a82641346c233d9d4bfe1da | 23,526 |
from datetime import datetime
def slope_finder(station):
""" This function computes the slope of a least-squares fit of polynomial
of degree p to water level data and return that is it positive or negative"""
try:
dt = 2
dates, levels = fetch_measure_levels(station.measure_id, dt=datetime.timedelta(days=dt))
slope = polyfit(dates,levels,1)
if slope[1] >= 0:
return True
else:
return False
except:
return None | 035a78a4e54b94945837e97c6dce53bc36770956 | 23,527 |
def get_attr_counts(datas, attr):
"""
不同属性值的数量.
:param datas:
:type datas: list[BaseDataSample]
:param attr:
:type attr: str
:return:
"""
results = {}
for data in datas:
value = data.get_value(attr)
if isinstance(value, list):
for v in value:
results.setdefault(attr + "-" + v, 0)
results[attr + "-" + v] += 1
else:
results.setdefault(value, 0)
results[value] += 1
return results | bea8e6e1c99efe1ad18894831006f0e218517c74 | 23,528 |
def split(string: str, separator: str = " ") -> list:
"""
Will split the string up into all the values separated by the separator (defaults to spaces)
>>> split("apple#banana#cherry#orange",separator='#')
['apple', 'banana', 'cherry', 'orange']
>>> split("Hello there")
['Hello', 'there']
>>> split("11/22/63",separator = '/')
['11', '22', '63']
>>> split("12:43:39",separator = ":")
['12', '43', '39']
"""
split_words = []
last_index = 0
for index, char in enumerate(string):
if char == separator:
split_words.append(string[last_index:index])
last_index = index + 1
elif index + 1 == len(string):
split_words.append(string[last_index : index + 1])
return split_words | 73e01d7ff9111d949f31f37b36c3b0656d06e340 | 23,529 |
import ast
def _find_class(name: str, target: ast.Module) -> t.Tuple[int, ast.ClassDef]:
"""Returns tuple containing index of classdef in the module and the ast.ClassDef object"""
for idx, definition in enumerate(target.body):
if isinstance(definition, ast.ClassDef) and definition.name == name:
return idx, definition | ad67d36772ef9541edb72a9d56f3553dc9eaffd2 | 23,530 |
def tidy_osx_command_line_tools_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Install OSX command line tools
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
runner: Runner = client.osx_command_line_tools()
return parse_response(response=runner,
human_readable_name="OSx command line tools",
installed_software="command line tools",
additional_vars={}) | 0045848f0cef054dfab24a7698e4b3432843f747 | 23,532 |
def nav_entries(context):
"""
Renders dynamic nav bar entries from nav_registry for the provided user.
"""
context['nav_registry'] = nav_registry
return context | 8af1917c04a9cbd17895c0fab0239d6fd7c009d2 | 23,533 |
from typing import Any
def get_largest_component(graph: ig.Graph, **kwds: Any) -> ig.Graph:
"""Get largest component of a graph.
``**kwds`` are passed to :py:meth:`igraph.Graph.components`.
"""
vids = None
for component in graph.components(**kwds):
if vids is None or len(component) > len(vids):
vids = component
return graph.induced_subgraph(vids) | 24f04905c767f02a03b5a6fbf4ae0ba0b1f49269 | 23,534 |
def hiring_contests():
"""Gets all the hiring challenges from all the availbale platforms"""
contests_data = get_contests_data()
active_contests = contests_data["active"]
upcoming_contests = contests_data["pending"]
get_challenge_name = lambda x : x.lower().split()
hiring_challenges = [contest for contest in active_contests
if "hiring" in get_challenge_name(contest["contest_name"])]
hiring_challenges += [contest for contest in upcoming_contests
if "hiring" in get_challenge_name(contest["contest_name"])]
return hiring_challenges | 91566f0117fc5bc38db7bc930d5e4c7bd1bd2992 | 23,535 |
import torch
def _find_quantized_op_num(model, white_list, op_count=0):
"""This is a helper function for `_fallback_quantizable_ops_recursively`
Args:
model (object): input model
white_list (list): list of quantizable op types in pytorch
op_count (int, optional): count the quantizable op quantity in this module
Returns:
the quantizable op quantity in this module
"""
quantize_op_num = op_count
for name_tmp, child_tmp in model.named_children():
if type(child_tmp) in white_list \
and not (isinstance(child_tmp, torch.quantization.QuantStub)
or isinstance(child_tmp, torch.quantization.DeQuantStub)):
quantize_op_num += 1
else:
quantize_op_num = _find_quantized_op_num(
child_tmp, white_list, quantize_op_num)
return quantize_op_num | c51b06e476ff4804d5bdfca5a187717536a0418f | 23,536 |
from unittest.mock import patch
def test_process_bulk_queue_errors(app, queue):
"""Test error handling during indexing."""
with app.app_context():
# Create a test record
r1 = Record.create({
'title': 'invalid', 'reffail': {'$ref': '#/invalid'}})
r2 = Record.create({
'title': 'valid', })
db.session.commit()
RecordIndexer().bulk_index([r1.id, r2.id])
ret = {}
def _mock_bulk(client, actions_iterator, **kwargs):
ret['actions'] = list(actions_iterator)
return len(ret['actions'])
with patch('invenio_indexer.api.bulk', _mock_bulk):
# Exceptions are caught
assert RecordIndexer().process_bulk_queue() == 1
assert len(ret['actions']) == 1
assert ret['actions'][0]['_id'] == str(r2.id) | 83c4609eb62d65fb7d53117906a0d6f128fe7b30 | 23,539 |
def list_to_string(the_list):
"""Converts list into one string."""
strings_of_list_items = [str(i) + ", " for i in the_list]
the_string = "".join(strings_of_list_items)
return the_string | f580dd8646526e64bb50297608e8ad8e338d9197 | 23,540 |
from typing import Optional
def get_rate_plan(apiproduct_id: Optional[str] = None,
organization_id: Optional[str] = None,
rateplan_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRatePlanResult:
"""
Gets the details of a rate plan.
"""
__args__ = dict()
__args__['apiproductId'] = apiproduct_id
__args__['organizationId'] = organization_id
__args__['rateplanId'] = rateplan_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:apigee/v1:getRatePlan', __args__, opts=opts, typ=GetRatePlanResult).value
return AwaitableGetRatePlanResult(
apiproduct=__ret__.apiproduct,
billing_period=__ret__.billing_period,
consumption_pricing_rates=__ret__.consumption_pricing_rates,
consumption_pricing_type=__ret__.consumption_pricing_type,
created_at=__ret__.created_at,
currency_code=__ret__.currency_code,
description=__ret__.description,
display_name=__ret__.display_name,
end_time=__ret__.end_time,
fixed_fee_frequency=__ret__.fixed_fee_frequency,
fixed_recurring_fee=__ret__.fixed_recurring_fee,
last_modified_at=__ret__.last_modified_at,
name=__ret__.name,
revenue_share_rates=__ret__.revenue_share_rates,
revenue_share_type=__ret__.revenue_share_type,
setup_fee=__ret__.setup_fee,
start_time=__ret__.start_time,
state=__ret__.state) | c439d2b991174b2fa4137d0b88f04af0ba4a22b9 | 23,542 |
def detail_blotter(backtest, positions, holdings, mode='simplified'):
"""
分品种获取详细交易状况,合并市场数据、交易情况和账户变动
参数:
backtest, positions, holdings为回测引擎返回的变量
mode: 'simplified'则市场行情数据只保留'close'列
(DataFrame的字典)
返回:
字典,键为symbol,值为DataFrame格式
示例:
blotter = detail_blotter(backtest, positions, holdings)
blotter_rb = blotter['RB']
blotter_br.head()
"""
blotter = dict()
data_dict = backtest.data_handler.latest_symbol_data
trades = backtest.trade_record()
trades['direction'] = [1 if d=='BUY' else -1 for d in trades['direction']]
trades['cost'] = trades['direction'] * trades['fill_price'] * trades['quantity']
for symb in data_dict.keys():
data = pd.DataFrame(data_dict[symb], columns=['symbol', 'datetime', 'open', 'high', 'low',
'close', 'volume'])
if mode == 'simplified':
data = data[['datetime', 'close']].set_index('datetime')
else: # 'full'
data = data.set_index('datetime')
trades_symb = trades[trades['symbol']==symb][['direction','fill_price', 'commission', 'cost']]
holdings_symb = pd.Series(holdings[symb], name='holdings')
positions_symb = pd.Series(positions[symb], name='positions')
merge = data.join([positions_symb, holdings_symb, trades_symb], how='outer').iloc[1:, :].fillna(0.)
# 计算每根bar结束后的盈亏
merge['pnl'] = merge['holdings'] - merge['holdings'].shift(1) - merge['cost'].shift(1) - \
merge['commission'].shift(1)
merge.ix[0, 'pnl'] = 0. # NaN
# 回测结束时对可能存在的强制平仓进行额外计算
merge.ix[-1, 'pnl'] = merge['holdings'].iloc[-1] - merge['holdings'].iloc[-2] - merge['cost'].iloc[-1] - \
merge['commission'].iloc[-1]
# 以回测第一根bar收盘价作为起始资本
merge['adj_total'] = merge['pnl'].cumsum() + merge['close'].iloc[0]
del merge['cost']
blotter[symb] = merge
return blotter | 9a2d8168cfc9ee979be847c6dac783a70503503c | 23,543 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.