content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def make_flat_roof(bm, faces, thick, outset, **kwargs):
"""Create a basic flat roof
Args:
bm (bmesh.types.BMesh): bmesh from current edit mesh
faces (bmesh.types.BMFace): list of user selected faces
thick (float): Thickness of the roof
outset (float): How mush the roof overhangs
**kwargs: Extra kargs from RoofProperty
Returns:
list(bmesh.types.BMFace): Resulting top face
"""
ret = bmesh.ops.extrude_face_region(bm, geom=faces)
bmesh.ops.translate(bm, vec=(0, 0, thick), verts=filter_geom(ret["geom"], BMVert))
top_face = filter_geom(ret["geom"], BMFace)[-1]
link_faces = [f for e in top_face.edges for f in e.link_faces if f is not top_face]
bmesh.ops.inset_region(bm, faces=link_faces, depth=outset, use_even_offset=True)
bmesh.ops.recalc_face_normals(bm, faces=bm.faces)
bmesh.ops.delete(bm, geom=faces, context="FACES")
new_faces = list({f for e in top_face.edges for f in e.link_faces})
return bmesh.ops.dissolve_faces(bm, faces=new_faces).get("region") | bfb218cfac8ff8c2e2c4bf428aba50a85062dd6e | 11,884 |
def make_comma_separated_list_fiter(filter_name, field_expression):
"""
Create a filter which uses a comma-separated list of values to filter the queryset.
:param str filter_name: the name of the query param to fetch values
:param str field_expression: the field expression to filter the queryset, like `categories__in`
"""
def filter_queryset(instance, request, queryset, view):
values = request.query_params.get(filter_name)
if not values:
return queryset
values = [v.strip() for v in values.split(",")]
return queryset.filter(**{field_expression: values})
attrs = {}
attrs.setdefault("filter_queryset", filter_queryset)
return type(str("CommaSeparatedIDListFilter"), (filters.BaseFilterBackend,), attrs) | 7f6088f14195a93dca6cc68b0d2c6d4840cc159c | 11,885 |
def get_dashboard(id_, token_info=None, user=None):
"""Get a single dashboard by ID
:param id: ID of test dashboard
:type id: str
:rtype: Dashboard
"""
dashboard = Dashboard.query.get(id_)
if not dashboard:
return "Dashboard not found", 404
if dashboard and dashboard.project and not project_has_user(dashboard.project, user):
return "Forbidden", 403
return dashboard.to_dict() | 8110177aac6457771881cc78a45528acd70b2ab3 | 11,886 |
from pathlib import Path
def get_recorder(execution_cmd, ml_names):
"""
The helper function for generating a recorder object
"""
if not execution_cmd.record_progress:
return DummyRecorder()
root_dir_path = Path(__file__).parent.parent
log_dir_path = root_dir_path.joinpath(
"games", execution_cmd.game_name, "log")
game_params_str = [str(p) for p in execution_cmd.game_params]
filename_prefix = (
"manual" if execution_cmd.game_mode == GameMode.MANUAL else "ml")
if game_params_str:
filename_prefix += "_" + "_".join(game_params_str)
return Recorder(ml_names, log_dir_path, filename_prefix) | b21e9fb9de5fc1e5852647196a52d5df14255d32 | 11,887 |
def get_customer_key():
""" Reutrn the key of the sample customer from file """
customer_file = open("sample_customer", "r")
customer_key = customer_file.readline().rstrip("\n")
customer_file.close()
return customer_key | 2b63c671aa6f8dd5fe6fbd9d58394e8c178901f5 | 11,889 |
def tau_tex(tex, tau0_):
"""
Eq. (15) Goldsmith et al. (2012)
"""
g = gu/gl
return tau0_*(1. - np.exp(-tstar/tex))/(1. + g*np.exp(-tstar/tex)) | 1dc5b0254c2b4cb9bf443651d2ffe4685543b67d | 11,890 |
def cached(f):
"""Decorator to cache result of property."""
@wraps(f)
def inner(self):
name = '_{}'.format(f.__name__)
if getattr(self, name, None) is None:
setattr(self, name, f(self))
return getattr(self, name)
return inner | 9c9e14f358337efe7a4a5cffe9b3a46b1065951c | 11,891 |
def thesaurus(*args, sort=False) -> dict:
"""Формирует словарь, в котором ключи — первые буквы слов,
а значения — списки, содержащие слова, начинающиеся с соответствующей буквы
:param *args: перечень слов
:param sort: признак необходимости сортировки словаря по алфавиту (True - сортировать, False - не сортировать)
:return: словарь слов по первым буквам"""
if sort:
args = sorted(list(args)) # Changed in version 3.7: Dictionary order is guaranteed to be insertion order
dict_out = {}
for word in args:
dict_value = dict_out.setdefault(word[0], list())
if word not in dict_value:
dict_value.append(word)
dict_out[word[0]] = dict_value
return dict_out | 2e02e4f98a85eaa19a9374d5dfba82dd855b9636 | 11,892 |
def calculate_ranking(imbalanced_results):
"""Calculate the ranking of oversamplers for
any combination of datasets, classifiers and
metrics."""
wide_optimal = calculate_wide_optimal(imbalanced_results)
ranking_results = wide_optimal.apply(
lambda row: _return_row_ranking(
row[3:], SCORERS[row[2].replace(' ', '_').lower()]._sign
),
axis=1,
)
ranking = pd.concat([wide_optimal.iloc[:, :3], ranking_results], axis=1)
return ranking | 81c41848f618245661338c52f73bb80ab865b7df | 11,893 |
def extract鏡像翻訳(item):
"""
Parser for '鏡像翻訳'
"""
if 'anime' in str(item['tags']).lower():
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
tagmap = [
('sodachi fiasco', 'Orokamonogatari - Sodachi Fiasco', 'translated'),
('karen ogre', 'Wazamonogatari - Karen Ogre', 'translated'),
('shinobu mustard', 'Shinobumonogatari - Shinobu Mustard', 'translated'),
('tsubasa sleeping', 'Wazamonogatari - Tsubasa Sleeping', 'translated'),
('acerola bon appetit', 'Wazamonogatari - Acerola Bon Appetit', 'translated'),
('tsudzura human', 'Musubimonogatari - Tsudzura Human', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
titlemap = [
('jinrui saikyou no netsuai', 'Jinrui Saikyou no Netsuai', 'translated'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 2e561a9d6b66a5b243996a6a8e530a8f46476292 | 11,894 |
def config(live_server, django_user_model):
"""Create a user and return an auth_token config matching that user."""
user = django_user_model.objects.create(
email='jathan@localhost', is_superuser=True, is_staff=True
)
data = {
'email': user.email,
'secret_key': user.secret_key,
'auth_method': 'auth_token',
'url': live_server.url + '/api',
# 'api_version': API_VERSION,
'api_version': '1.0', # Hard-coded.
}
return data | 031648b92a8347f8cc5e14213eda85c9ed73d3ee | 11,895 |
from typing import Dict
from typing import Union
from typing import List
from typing import Any
from typing import OrderedDict
def combine_data_by_key(
combined_outputs: Dict[str, Union[List[Any], Any]],
output: Dict[str, Union[List[Any], Any]],
) -> Dict[str, Union[List[Any], Any]]:
"""
Combine lists in two multimaps
Args:
combined_outputs: Initial multimap to combine, presumably already combined
output: New multimap to add to initial multimap
Returns:
Combined multimaps (does not modify initial or new data)
"""
combined_keys = combine_keys(combined_outputs, output)
return OrderedDict(
(key, combine_datas(combined_outputs.get(key, []), output.get(key, [])))
for key in combined_keys
) | 3311c204cd3bce79a9613bb212978e8178a4b05f | 11,896 |
def acceptCode(request):
"""Redeems a code to accept invitation cash"""
params = request.get_params(schemas.AcceptCodeSchema())
device = get_device(request)
customer = device.customer
access_token = get_wc_token(request, customer)
postParams = {
'code': params['code']
}
response = wc_contact(
request, 'POST', 'wallet/accept-code', params=postParams,
access_token=access_token).json()
if response.get('error'):
return { 'error': response.get('error')}
elif response.get('invalid'):
return { 'invalid': response.get('invalid')}
else:
return response | 8a0c1201eb1135789e42bee01d3f0ab8480963b6 | 11,899 |
from typing import List
def _find_available_share_drive_letter(share_ignores: List[str] = None) -> str:
"""Find an available drive letter for a share.
This function iterates backwards through the ASCII uppercase letters trying
and checks them against the current net use drive mappings. Once it finds
an available drive letter, it passes that back to the caller. If an
available drive letter is not found, a RuntimeError is raised.
Args:
share_ignores (List[str]): A list of share letters to ignore.
Returns:
str: An available drive letter (i.e., 'Z:') for a network share.
Raises:
RuntimeError
"""
LOGGER.write('Looking for an available share letter.')
drive_mapping = _get_current_drive_mapping()
# Iterate backwards through letters to see if they've already been used.
available_letter = ''
for letter in reversed(ascii_uppercase):
if letter in share_ignores:
continue
letter = f'{letter}:'
if letter not in drive_mapping:
available_letter = letter
break
if not available_letter:
raise RuntimeError('Unable to find a free drive letter to map to!')
return available_letter | 0b74d17660e2b7f2d2ca92b23b5e67dac9b58f61 | 11,900 |
def integrate(sde=None, *, q=None, sources=None, log=False, addaxis=False):
"""Decorator for Ito Stochastic Differential Equation (SDE)
integration.
Decorates a function representing the SDE or SDEs into the corresponding
``sdepy`` integrator.
Parameters
----------
sde : function
Function to be wrapped. Its signature and values should be
as expected for the ``sde`` method of the ``sdepy.SDE`` or
``sdepy.SDEs`` classes.
q : int
Number of equations. If ``None``, attempts a test evaluation
of ``sde`` to find out. ``q=0`` indicates a single equation.
sources : set
Stochasticity sources used in the equation. If ``None``,
attempts a test evaluation of ``sde`` to find out.
log : bool
Sets the ``log`` attribute for the wrapping class.
addaxis : bool
Sets the ``addaxis`` attribute for the wrapping class.
Returns
-------
A subclass of ``sdepy.SDE`` or ``sdepy.SDEs`` as appropriate,
and of ``sdepy.integrator``, with the given ``sde``
cast as its ``sde`` method.
Notes
-----
To prevent a test evaluation of ``sde``, explicitly provide
the intended ``q`` and ``sources`` as keyword arguments to ``integrate()``.
The test evaluation is attempted as ``sde()`` and, upon failure,
again as ``sde(1., 1.)``.
Examples
--------
>>> from sdepy import integrate
>>> @integrate
... def my_process(t, x, theta=1., k=1., sigma=1.):
... return {'dt': k*(theta - x), 'dw': sigma}
>>> P = my_process(x0=1, sigma=0.5, paths=100*1000, steps=100)
>>> x = P(timeline=(0., 0.5, 1.))
>>> x.shape
(3, 100000)
"""
if sde is None:
def decorator(sde):
return integrate(sde, q=q, sources=sources,
log=log, addaxis=addaxis)
return decorator
else:
SDE_class = _SDE_from_function(sde, q=q, sources=sources,
log=log, addaxis=addaxis)
class sde_integrator(SDE_class, integrator):
pass
return sde_integrator | 3914a3dffae1f148459cb72ec6f23547c2dbff97 | 11,901 |
import pymbar
def calculate_statistical_inefficiency_runs(traj_l):
"""
Using fast autocorrelation calculation to estimate statistical inefficiency. This code wraps
a function from pymbar.
References
----------
[1] Shirts MR and Chodera JD. Statistically optimal analysis of samples from
multiple equilibrium states. J. Chem. Phys. 129:124105, 2008
http://dx.doi.org/10.1063/1.2978177
[2] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
histogram analysis method for the analysis of simulated and parallel tempering simulations.
JCTC 3(1):26-41, 2007.
"""
try:
except ImportError as err:
err.args = (err.args[0] + "\n You need to install pymbar to use this function.",)
raise
iinv = np.array([pymbar.timeseries.statisticalInefficiency_fft(tra) for tra in traj_l])
return (iinv - 1.0) / 2.0 | e146820d28258a8b6b5bc60420cbf56fdade328b | 11,902 |
import torch
def dataio_prep(hparams):
"""Creates the datasets and their data processing pipelines"""
# 1. define tokenizer and load it
modelpath = download_to_dir(hparams["tok_mdl_file"], hparams["save_folder"])
download_to_dir(hparams["tok_voc_file"], hparams["save_folder"])
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
)
tokenizer.sp.load(modelpath)
if (tokenizer.sp.eos_id() + 1) == (tokenizer.sp.bos_id() + 1) == 0 and not (
hparams["eos_index"]
== hparams["bos_index"]
== hparams["blank_index"]
== hparams["unk_index"]
== 0
):
raise ValueError(
"Desired indexes for special tokens do not agree "
"with loaded tokenizer special tokens !"
)
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes(hparams["input_type"])
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("words")
@sb.utils.data_pipeline.provides("tokens_bos", "tokens_eos", "tokens")
def text_pipeline(words):
tokens_list = tokenizer.sp.encode_as_ids(words)
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
# 4. Create datasets
data = {}
for dataset in ["train", "valid", "test"]:
data[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams[f"{dataset}_annotation"],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, text_pipeline],
output_keys=["id", "sig", "tokens_bos", "tokens_eos", "tokens"],
)
if dataset != "train":
data[dataset] = data[dataset].filtered_sorted(sort_key="length")
# Sort train dataset and ensure it doesn't get un-sorted
if hparams["sorting"] == "ascending" or hparams["sorting"] == "descending":
data["train"] = data["train"].filtered_sorted(
sort_key="length", reverse=hparams["sorting"] == "descending",
)
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] != "random":
raise NotImplementedError(
"Sorting must be random, ascending, or descending"
)
return data, tokenizer | 0cb89799b58e0ba68781a538f17d525749ab4796 | 11,903 |
def get_superlative_type(question_normal):
"""What TV series was Mark Harmon the star of that ran the least amount of time on TV ?"""
result = 'argmax'
question_normal = question_normal.lower()
superlative_serialization_list = superlative_serialization(question=question_normal)
for element in superlative_serialization_list:
if element in ['argmax', 'argmin']:
result = element
break
return result | 8129fcf104dd49117d3ffb49f73752902c1a27f4 | 11,904 |
from re import L
def coord_net_spec(ks=3, stride=1, pad=0, pool=2, dstride=2, dpad=0):
"""
Define net spec for simple conv-pool-deconv pattern common to all
coordinate mapping tests.
"""
n = caffe.NetSpec()
n.data = L.Input(shape=dict(dim=[2, 1, 100, 100]))
n.aux = L.Input(shape=dict(dim=[2, 1, 20, 20]))
n.conv = L.Convolution(
n.data, num_output=10, kernel_size=ks, stride=stride, pad=pad)
n.pool = L.Pooling(
n.conv, pool=P.Pooling.MAX, kernel_size=pool, stride=pool, pad=0)
# for upsampling kernel size is 2x stride
try:
deconv_ks = [s*2 for s in dstride]
except:
deconv_ks = dstride*2
n.deconv = L.Deconvolution(
n.pool, num_output=10, kernel_size=deconv_ks, stride=dstride, pad=dpad)
return n | 3ee824372d99ae12cb3318d2e17dbfa67abbd31c | 11,905 |
def note_favorite(note):
"""
get the status of the note as a favorite
returns True if the note is marked as a favorite
False otherwise
"""
if 'favorite' in note:
return note['favorite']
return False | 503f4e3abaab9d759070c725cdf783d62d7c05d2 | 11,906 |
import itertools
def _crossproduct(template: CheckListTemplate):
"""
Takes the output of editor.template and does the cross product of contexts and qas
"""
ret = []
ret_labels = []
for instance in template.data:
cs = instance["contexts"]
qas = instance["qas"]
d = list(itertools.product(cs, qas))
ret.append([(x[0], x[1][0]) for x in d])
ret_labels.append([x[1][1] for x in d])
template.data = ret
template.labels = ret_labels
return template | 6f2bfd9b3c1aa392179c377e1a87c0d2221f0a45 | 11,907 |
def thread(function):
"""Runs the decorated function within a concurrent thread,
taking care of the result and error management.
Decorated functions will return a concurrent.futures.Future object
once called.
"""
@wraps(function)
def wrapper(*args, **kwargs):
future = Future()
launch_thread(_function_handler, function, args, kwargs, future)
return future
return wrapper | dd3d7d15281a6821e7235695e6311f9aabf96ea9 | 11,908 |
def collapse_range(arg, value_delimiter=',', range_delimiter='-'):
"""
Collapses a list of values into a range set
:param arg: The list of values to collapse
:param value_delimiter: The delimiter that separates values
:param range_delimiter: The delimiter that separates a value range
:return: An array of collapsed string values
:rtype: list
"""
values = list()
expanded = arg.split(value_delimiter)
range_start = None
for v1, v2 in lookahead(expanded):
if v2:
v1 = int(v1)
v2 = int(v2)
if (v1 + 1) == v2:
if not range_start:
range_start = v1
elif range_start:
item = '{}{}{}'.format(range_start, range_delimiter, v1)
values.extend([item])
range_start = None
else:
values.extend([v1])
elif range_start:
item = '{}{}{}'.format(range_start, range_delimiter, v1)
values.extend([item])
range_start = None
else:
values.extend([v1])
return [str(x) for x in values] | 5caeb8609ab83e52041cc83bbe53d9aa2316dd01 | 11,909 |
def compute_diag_mog_params(M=int(4), snr=3.):
"""Returns diagonal mixture of Gaussian target distribution settings for d=2
Args:
M: (Optional) Integer, number of components
snr: (Optional) Scaling of the means
"""
d = int(2)
weights = np.ones(M)
weights /= np.sum(weights)
# change this to set the means apart
means = np.zeros((M, d))
if M == 3:
means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.]])
if M == 4:
means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.], [1., -1.]])
if M == 6:
means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.], [1., -1.], [0, 2.], [-2, 0.]])
if M == 8:
means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.], [1., -1.], [0, 2.], [-2, 0.], [2, 0.], [0, -2.]])
covs = np.ones(M)
# compute the expected value of E[||X-Y||^2] for X, Y iid from P
mean_sqdist = 0.
for i in range(M):
for j in range(M):
temp = npl.norm(means[i])**2 + npl.norm(means[j])**2 - 2 * np.dot(means[i], means[j])
temp += d*(covs[i]+ covs[j])
mean_sqdist += weights[i] * weights[j] * temp
params_p = {"name": "diag_mog",
"weights": weights,
"means": means,
"covs": covs,
"d": int(d),
"mean_sqdist" : mean_sqdist,
"saved_samples": False,
"flip_Pnmax": False
}
return(params_p) | 4247889dcf8bddc93e4433d685ec81fd59e591a7 | 11,910 |
def has_ifm2(npu_op: NpuBlockOperation) -> bool:
"""Checks if op has non-scalar IFM2"""
return npu_op.ifm2 is not None and npu_op.ifm2_scalar is None | b51092fa486979fbd53d0b1c70ac4390f22df87f | 11,911 |
def ilsvrc_fix_args(args):
"""
Update the args with fixed parameter in ilsvrc
"""
args.ds_name="ilsvrc"
args.num_classes == 1000
# GPU will handle mean std transformation to save CPU-GPU communication
args.do_mean_std_gpu_process = True
args.input_type = 'uint8'
args.mean = get_augmented_data.ilsvrc_mean
args.std = get_augmented_data.ilsvrc_std
#assert args.do_mean_std_gpu_process and args.input_type == 'uint8'
#assert args.mean is not None and args.std is not None
decay_power = args.batch_size / float(ILSVRC_DEFAULT_BATCH_SIZE)
args.batch_norm_decay=0.9**decay_power # according to Torch blog
return args | 64e956a78fc4e64040efa42fd5b6215642430d69 | 11,912 |
def get_mock_adapter() -> Adapter:
"""Get a requests-mock Adapter with some URLs mocked by default"""
adapter = Adapter()
adapter.register_uri(
ANY_METHOD,
MOCKED_URL,
headers={'Content-Type': 'text/plain'},
text='mock response',
status_code=200,
)
adapter.register_uri(
ANY_METHOD,
MOCKED_URL_HTTPS,
headers={'Content-Type': 'text/plain'},
text='mock https response',
status_code=200,
)
adapter.register_uri(
ANY_METHOD,
MOCKED_URL_JSON,
headers={'Content-Type': 'application/json'},
json={'message': 'mock json response'},
status_code=200,
)
adapter.register_uri(
ANY_METHOD,
MOCKED_URL_REDIRECT,
headers={'Content-Type': 'text/plain', 'Location': MOCKED_URL_REDIRECT_TARGET},
text='mock redirect response',
status_code=302,
)
adapter.register_uri(
ANY_METHOD,
MOCKED_URL_REDIRECT_TARGET,
headers={'Content-Type': 'text/plain'},
text='mock redirected response',
status_code=200,
)
return adapter | bede0a72fa8336663d81eeb352a9191280f9f2d6 | 11,913 |
def eqtls_weights_summing(eqtl_occurrence_log_likelihood, ens_gene_id, target_species_hit, converted_eqtls, gtex_weights_dict, chr_start, chr_end, gtex_variants, tf_len, gene_len):
"""
Identify if any of the eQTLs associated with this gene overlap this predicted TFBS.
Retrieve the log-likelihood scores for all of them.
Fix.
"""
eqtl_weights = []
if len(converted_eqtls) > 0:
# determine the weight score for likelihood of this magnitude eQTL.
# ref-point
motif_start = target_species_hit[4]
motif_end = target_species_hit[5]
for converted_eqtl in converted_eqtls:
converted_eqtl_start = converted_eqtl[0]
converted_eqtl_end = converted_eqtl[1]
converted_eqtl_score_mag = abs(converted_eqtl[2])
overlap = overlap_range([motif_start, motif_end], [converted_eqtl_start, converted_eqtl_end])
if len(overlap) > 0:
eqtl_weight = gtex_weights_dict[converted_eqtl_score_mag]
eqtl_weights.append(eqtl_weight + eqtl_occurrence_log_likelihood)
eqtl_weights_sum = sum(eqtl_weights)
return eqtl_weights_sum | 1ac37a4eed54c282e8f086b50ffc96946a1bdb29 | 11,914 |
def get_file_chunks_in_range(context, filediff, interfilediff,
first_line, num_lines):
"""
A generator that yields chunks within a range of lines in the specified
filediff/interfilediff.
This is primarily intended for use with templates. It takes a
RequestContext for looking up the user and for caching file lists,
in order to improve performance and reduce lookup times for files that have
already been fetched.
Each returned chunk is a dictionary with the following fields:
============= ========================================================
Variable Description
============= ========================================================
``change`` The change type ("equal", "replace", "insert", "delete")
``numlines`` The number of lines in the chunk.
``lines`` The list of lines in the chunk.
``meta`` A dictionary containing metadata on the chunk
============= ========================================================
Each line in the list of lines is an array with the following data:
======== =============================================================
Index Description
======== =============================================================
0 Virtual line number (union of the original and patched files)
1 Real line number in the original file
2 HTML markup of the original file
3 Changed regions of the original line (for "replace" chunks)
4 Real line number in the patched file
5 HTML markup of the patched file
6 Changed regions of the patched line (for "replace" chunks)
7 True if line consists of only whitespace changes
======== =============================================================
"""
def find_header(headers):
for header in reversed(headers):
if header[0] < first_line:
return {
'line': header[0],
'text': header[1],
}
interdiffset = None
key = "_diff_files_%s_%s" % (filediff.diffset.id, filediff.id)
if interfilediff:
key += "_%s" % (interfilediff.id)
interdiffset = interfilediff.diffset
if key in context:
files = context[key]
else:
assert 'user' in context
request = context.get('request', None)
files = get_diff_files(filediff.diffset, filediff, interdiffset,
request=request)
populate_diff_chunks(files, get_enable_highlighting(context['user']),
request=request)
context[key] = files
if not files:
raise StopIteration
assert len(files) == 1
last_header = [None, None]
for chunk in files[0]['chunks']:
if ('headers' in chunk['meta'] and
(chunk['meta']['headers'][0] or chunk['meta']['headers'][1])):
last_header = chunk['meta']['headers']
lines = chunk['lines']
if lines[-1][0] >= first_line >= lines[0][0]:
start_index = first_line - lines[0][0]
if first_line + num_lines <= lines[-1][0]:
last_index = start_index + num_lines
else:
last_index = len(lines)
new_chunk = {
'lines': chunk['lines'][start_index:last_index],
'numlines': last_index - start_index,
'change': chunk['change'],
'meta': chunk.get('meta', {}),
}
if 'left_headers' in chunk['meta']:
left_header = find_header(chunk['meta']['left_headers'])
right_header = find_header(chunk['meta']['right_headers'])
del new_chunk['meta']['left_headers']
del new_chunk['meta']['right_headers']
if left_header or right_header:
header = (left_header, right_header)
else:
header = last_header
new_chunk['meta']['headers'] = header
yield new_chunk
first_line += new_chunk['numlines']
num_lines -= new_chunk['numlines']
assert num_lines >= 0
if num_lines == 0:
break | 85e96d7c1a0c09880c865e1ea9f5e3eb29dca122 | 11,915 |
def parse_metric(y_train, goal):
"""
Parse the metric to the dictionary
"""
y_array = np.array(y_train, dtype=np.float64)
if goal == api_pb2.MINIMIZE:
y_array *= -1
return y_array | 164518c4ba84e0fef450ec9e4196ec90de269fd3 | 11,917 |
import math
def erfc(x):
"""Complementary error function (via `http://bit.ly/zOLqbc`_)"""
z = abs(x)
t = 1. / (1. + z / 2.)
r = t * math.exp(-z * z - 1.26551223 + t * (1.00002368 + t * (
0.37409196 + t * (0.09678418 + t * (-0.18628806 + t * (
0.27886807 + t * (-1.13520398 + t * (1.48851587 + t * (
-0.82215223 + t * 0.17087277
)))
)))
)))
return 2. - r if x < 0 else r | fd2a44142042e81ef1fc5f649186a41ae4a152b0 | 11,918 |
import urllib
import logging
import time
import json
def request_until_success(url, max_attempts=5, wait=5):
"""Makes a request a few times in case of a 500 error.
Should use exponential backoff?
"""
req = urllib.request.Request(url)
success = False
num_tries = 0
while not success:
try:
num_tries += 1
response = urllib.request.urlopen(req)
success = response.getcode() == 200
except urllib.request.HTTPError as e:
logging.error(e)
logging.error("Error on url {}".format(url))
if e.code == 500 and num_tries < max_attempts:
logging.error("trying again soon")
time.sleep(wait)
else:
logging.error(e.reason)
raise e
return json.loads(response.read().decode('UTF-8')) | 639371539df5daabffaf3c3978c8677cbf2f8b4e | 11,919 |
from typing import Optional
def uuid(name, value) -> "Optional[str]":
"""Validate that the value is a UUID
Args:
name (str): Name of the argument
value (any): A UUID string value
Returns:
The value, or None if value is None
Raises:
InvalidParameterValue: if the value is not a valid UUID
"""
if value is None:
return
if not uuidutils.is_uuid_like(value):
raise InvalidParameterValue(f"Expected UUID for {name}: {value}")
return value | 3e6ca1211c9ebbba5889917ee252d21aebaac74e | 11,920 |
import re
def expandall(text):
"""
Search for abbreviations in text using re_abbr (defined in utils.get_res).
For each abbreviation, find likely full term. Replace each instance of the
abbreviation in the text with the full term.
Parameters
----------
text : str
Text to search for abbreviations.
Returns
-------
text: str
Text with expanded abbreviations.
Examples
----------
>>> text = 'This is a test string (TS). I hope it is informative (inf).'
>>> expanded = expandall(text)
>>> print(expanded)
This is a test string (test string). I hope it is informative (informative).
"""
re_abbr, _ = get_res()
f = re.finditer(re_abbr, text)
for match in f:
if match is not None:
abb = str(match.group(1))
# Very long abbreviations will break regex.
if len(abb) < 9:
abR = make_abbr_regex(match)
fullterm = re.search(abR, text)
if fullterm is not None:
index = fullterm.group(0).find(' (')
fullterm = str(fullterm.group(0)[:index]).strip()
text = replace(text, abb, fullterm)
else:
logger.info('No full term detected for '
'abbreviation {0}'.format(abb))
else:
logger.warning('Abbreviation detection regex returned None.')
return text | 0c229ec32ef5d9315c39eff6f4a8fad427ccdb07 | 11,921 |
def get_source_fields(client, source_table):
"""
Gets column names of a table in bigquery
:param client: BigQuery client
:param source_table: fully qualified table name.
returns as a list of column names.
"""
return [f'{field.name}' for field in client.get_table(source_table).schema] | abc161f252c03647a99a6d2151c00288b176a4e7 | 11,922 |
def has_user_based_permission(obj, user, allow_superuser=True, allow_staff=False):
"""
Based on obj.get_user(), checks if provided user is that user.
Accounts for superusers and staff.
"""
if hasattr(obj, "get_user"):
obj_user = obj.get_user()
# User is logged in
if user.is_authenticated:
# If staff or superuser or share a common group, then yes.
if (allow_staff and user.is_staff) \
or (allow_superuser and user.is_superuser) \
or obj_user == user:
return True
return False | bcedf697280a75575e9d0202d1a6a65161a873ad | 11,923 |
from typing import Optional
from typing import Any
def trace_stack_top(trace_stack_var: ContextVar) -> Optional[Any]:
"""Return the element at the top of a trace stack."""
trace_stack = trace_stack_var.get()
return trace_stack[-1] if trace_stack else None | 4258a4247a8b40e5cf61a39e94bb30fed936b1de | 11,924 |
def fbconnect():
"""This allows users to use facebook account to sign in."""
if request.args.get("state") != login_session["state"]:
response = make_response(json.dumps("Invalid state parameter."), 401)
response.headers["Content-Type"] = "application/json"
return response
access_token = request.data
print "access token received %s " % access_token
app_id = json.loads(open("fb_client_secrets.json",
"r").read())["web"]["app_id"]
app_secret = json.loads(open("fb_client_secrets.json",
"r").read())["web"]["app_secret"]
url = ("https://graph.facebook.com/v2.8/oauth/access_token?"
"grant_type=fb_exchange_token&client_id=%s&client_secret=%s"
"&fb_exchange_token=%s") % (app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
token = data["access_token"]
# Use token to get user info from API
userinfo_url = "https://graph.facebook.com/v2.8/me"
url = userinfo_url + "?access_token=%s&fields=name,id,email" % token
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
print data
login_session["provider"] = "facebook"
login_session["username"] = data["name"]
login_session["email"] = data["email"]
login_session["facebook_id"] = data["id"]
login_session["access_token"] = token
# Get user picture
url = userinfo_url + \
"/picture?access_token=%s&redirect=0&height=200&width=200" % token
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
login_session["picture"] = data["data"]["url"]
# see if user exists
user_id = getUserID(login_session["email"])
if not user_id:
user_id = createUser(login_session)
login_session["user_id"] = user_id
output = ""
output += "<h1>Welcome, "
output += login_session["username"]
output += "!</h1>"
output += "<img src='"
output += login_session["picture"]
output += ("""'style='width: 300px; height: 300px;border-radius: 150px;
-webkit-border-radius: 150px;-moz-border-radius: 150px;'>""")
flash("Now logged in as %s" % login_session["username"])
return output | a4a1ec728ce6bfc7a9c8f3fff02896f63eed6dea | 11,925 |
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task | 8b8b3c4abdb8ae75fcfb2907010965e36bd1dfe5 | 11,926 |
def naiveMP(tsA, m, tsB=None):
"""
Calculate the Matrix Profile using the naive all-pairs calculation.
Parameters
----------
tsA: Time series containing the queries for which to calculate the Matrix Profile.
m: Length of subsequence to compare.
tsB: Time series to compare the query against. Note that, if no value is provided, tsB = tsA by default.
"""
return _matrixProfile(tsA, m, order.linearOrder, distanceProfile.naiveDistanceProfile, tsB) | 9d38e4384d3ad8581862df388c32ea17bd02734f | 11,927 |
def render_html(data):
"""
"""
data.setdefault('domain', DOMAIN)
template = '''
<table border="1" cellspacing="0" cellpadding="0">
<tr><td>类型</td><td>{type}</td></tr>
<tr><td>团队</td><td>{team}</td></tr>
<tr><td>项目</td><td>{project}</td></tr>
<tr><td>名称</td><td>{name}</td></tr>
<tr><td>接口</td><td>{interface[total]}个</td></tr>
<tr><td>断言</td><td>{interface[verify]}个</td></tr>
<tr><td>成功率</td><td>{interface[percent]}%</td></tr>
<tr><td>开始时间</td><td>{start}</td></tr>
<tr><td>结束时间</td><td>{end}</td></tr>
<tr><td>报告地址</td><td><a href="{domain}/report/detail?id={id}">测试报告-{id}</a></td></tr>
</table>
'''.format(**data)
return template | 92371e4e7589853fef167ea12f2e0461e39fcae4 | 11,929 |
from operator import invert
def segment_cells(image, max_cell_size):
"""Return segmented cells."""
image = identity(image)
wall = threshold_adaptive_median(image, block_size=101)
seeds = remove_small_objects(wall, min_size=100)
seeds = dilate_binary(seeds)
seeds = invert(seeds)
seeds = remove_small_objects(seeds, min_size=5)
seeds = connected_components(seeds, background=0)
segmentation = watershed_with_seeds(-image, seeds=seeds)
segmentation = remove_large_segments(segmentation, max_cell_size)
return segmentation, wall | 5479b04595b10e903e56b2a546c44e583b324c94 | 11,930 |
import string
def encrypt(message, key):
"""
>>> encrypt("Hello world",12)
'Tqxxa iadxp'
>>> encrypt("We are Penn State!!!",6)
'Ck gxk Vktt Yzgzk!!!'
>>> encrypt("We are Penn State!!!",5)
'Bj fwj Ujss Xyfyj!!!'
>>> encrypt(5.6,3)
'error'
>>> encrypt('Hello',3.5)
'error'
>>> encrypt(5.6,3.15)
'error'
"""
# --- YOU CODE STARTS HERE
# decide whether it is the right data type
if type(message) == str and type(key) == int:
# define a list that have the ascii number of character
words = string.ascii_letters
# use the for loop to transfer characters with keys
lowerchr = [chr((i - 97) % 26 + 97) for i in range(97 + key, 123 + key)]
capchr = [chr((i - 65) % 26 + 65) for i in range(65 + key, 91 + key)]
# join the lower and upper characters together
asc = ''.join(lowerchr) + ''.join(capchr)
# use the translate and maketrans function to transfer the ascii code to string
return message.translate(str.maketrans(words, asc))
# if the value type is not correct return "error"
return "error" | 991449aac78fba9a348a1c3b1d1d7b1f14faff11 | 11,931 |
def std(input, axis=None, keepdim=False, unbiased=True, out=None, name=None):
"""
:alias_main: paddle.std
:alias: paddle.std,paddle.tensor.std,paddle.tensor.stat.std
Computes the standard-deviation of the input Variable's elements along the specified
axis.
Args:
input (Variable): The input Variable to be computed standard-deviation, with data
type float32 and float64 supported.
axis (list|int, optional): The axis along which the standard-deviation is computed.
If `None`, compute the standard-deviation over all elements of :attr:`input`
and return a Variable with a single element, otherwise it must be in
the range :math:`[-rank(input), rank(input))`. If :math:`axis[i] < 0`,
the axis to compute is :math:`rank(input) + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimensions in
the output Variable. The dimensions in :attr:`axis` will be squeezed
and the result Variable will have :attr:`len(axis)` fewer dimensions
than the :attr:`input` unless :attr:`keepdim` is true, default False.
unbiased (bool, optional): Whether to compute standard-deviation via the unbiased
estimator, in which the divisor used in the computation is
:math:`N - 1`, where :math:`N` represents the number of elements
along :attr:`axis`, otherwise the divisor is :math:`N`. Default True.
out (Variable, optional): Alternate output Variable to store the result
standard-deviation . Default None.
name (str, optional): The name for this layer. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`. Default None.
Returns:
Variable: The result standard-deviation with the same dtype as :attr:`input`.
If :attr:`out = None`, returns a new Variable containing the
standard-deviation , otherwise returns a reference to the output Variable.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
paddle.std(x) # [0.28252685]
paddle.std(x, axis=[0]) # [0.0707107, 0.07071075, 0.07071064, 0.1414217]
paddle.std(x, axis=[-1]) # [0.30956957, 0.29439208]
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'std')
tmp = var(input, axis=axis, keepdim=keepdim, unbiased=unbiased, name=name)
tmp = layers.sqrt(tmp)
if out is not None:
layers.assign(input=tmp, output=out)
return out
else:
return tmp | b2710442ccf0377dd1d84521f5666c8253390e35 | 11,933 |
def check_file_behaviour(file_hash):
"""
Returns the file execution report.
"""
params = {
'hash': file_hash
}
api_endpoint = 'file/behaviour'
return http_request('GET', api_endpoint, params, DEFAULT_HEADERS) | 87bb4539e948683cfea05f75741d26e1063073d9 | 11,935 |
def is_access_group(outter_key, inner_key) -> None:
"""Prints access-group """
values = {}
if outter_key == "access-group":
if inner_key.get('index') is not None:
values['index'] = ', '.join(is_instance(inner_key.get('index', {})))
elif inner_key.get('name') is not None:
values['name'] = ', '.join(is_instance(inner_key.get('name', {})))
return values | 96ef5ab1f0b48f7a43b4ebd96d728ff7ad552964 | 11,936 |
def lidar_to_cam_frame(xyz_lidar, frame_calib):
"""Transforms points in lidar frame to the reference camera (cam 0) frame
Args:
xyz_lidar: points in lidar frame
frame_calib: FrameCalib frame calibration
Returns:
ret_xyz: (N, 3) points in reference camera (cam 0) frame
"""
# Pad the r0_rect matrix to a 4x4
r0_rect_mat = frame_calib.r0_rect
r0_rect_mat = np.pad(r0_rect_mat, ((0, 1), (0, 1)),
'constant', constant_values=0)
r0_rect_mat[3, 3] = 1
# Pad the vel_to_cam matrix to a 4x4
tf_mat = frame_calib.velo_to_cam
tf_mat = np.pad(tf_mat, ((0, 1), (0, 0)),
'constant', constant_values=0)
tf_mat[3, 3] = 1
# Pad the point cloud with 1's for the transformation matrix multiplication
one_pad = np.ones(xyz_lidar.shape[0]).reshape(-1, 1)
xyz_lidar = np.append(xyz_lidar, one_pad, axis=1)
# p_cam = P2 * (R0_rect * Tr_velo_to_cam * p_velo)
rectified = np.dot(r0_rect_mat, tf_mat)
ret_xyz = np.dot(rectified, xyz_lidar.T)
# Return (N, 3) points
return ret_xyz[0:3].T | 6b42a52ccca1101cd0f64ed6fadbcdc974a67b0f | 11,937 |
import torch
def hann_sinc_low_pass(x: Tensor, N: int, fs: int, fc: float) -> Tensor:
"""Hann windowed ideal low pass filter.
Args:
x: [n_batch, 1, n_sample]
N: the window will be [-N, N], totally 2N+1 samples.
Returns:
y: [n_batch, 1, n_sample]
"""
w = continuous_hann_sinc_filter(fs, fc, 2*N+1, x.dtype, x.device)
w = (w / w.sum()).view(1, 1, -1)
return torch.nn.functional.conv1d(x, w, padding=N) | 93ba44fd351d7c53e151c812cc6458433c916216 | 11,939 |
import copy
def ask_for_missing_options(arguments: CommandLineArguments, root: tk.Tk) -> ProgramOptions:
"""
Complete the missing information by askin the user interactively.
"""
values = copy.deepcopy(arguments)
if values.source_directory is None:
values.source_directory = insist_for_directory(
"Ordner mit allen Bildern auswaehlen.",
"Quellverzeichnis muss ausgewaehlt sein.",
root,
)
if len(values.target_directories) == 0:
values.target_directories.append(
insist_for_directory(
"Ordner in den die Bilder einsortiert werden sollen auswaehlen.",
"Mindestens ein Zielverzeichnis muss ausgewaehlt sein.",
root,
)
)
is_more_to_add = tkmb.askyesno(message="Ein weiteres Zielverzeichnis angeben?")
while is_more_to_add:
possible_directory = ask_for_directory(
"Ordner in den die Bilder einsortiert werden sollen auswaehlen.",
root,
)
if possible_directory is None:
tkmb.showwarning(message="Kein Verzeichnis gewaehlt!")
else:
values.target_directories.append(possible_directory)
is_more_to_add = tkmb.askyesno(message="Noch ein weiteres Zielverzeichnis angeben?")
program_options = ProgramOptions(
values.source_directory,
values.target_directories,
)
return program_options | 6be882199bee5abaef9b3362522b508e90222150 | 11,940 |
import math
def matrix2quaternion(m):
"""Returns quaternion of given rotation matrix.
Parameters
----------
m : list or numpy.ndarray
3x3 rotation matrix
Returns
-------
quaternion : numpy.ndarray
quaternion [w, x, y, z] order
Examples
--------
>>> import numpy
>>> from skrobot.coordinates.math import matrix2quaternion
>>> matrix2quaternion(np.eye(3))
array([1., 0., 0., 0.])
"""
m = np.array(m, dtype=np.float64)
tr = m[0, 0] + m[1, 1] + m[2, 2]
if tr > 0:
S = math.sqrt(tr + 1.0) * 2
qw = 0.25 * S
qx = (m[2, 1] - m[1, 2]) / S
qy = (m[0, 2] - m[2, 0]) / S
qz = (m[1, 0] - m[0, 1]) / S
elif (m[0, 0] > m[1, 1]) and (m[0, 0] > m[2, 2]):
S = math.sqrt(1. + m[0, 0] - m[1, 1] - m[2, 2]) * 2
qw = (m[2, 1] - m[1, 2]) / S
qx = 0.25 * S
qy = (m[0, 1] + m[1, 0]) / S
qz = (m[0, 2] + m[2, 0]) / S
elif m[1, 1] > m[2, 2]:
S = math.sqrt(1. + m[1, 1] - m[0, 0] - m[2, 2]) * 2
qw = (m[0, 2] - m[2, 0]) / S
qx = (m[0, 1] + m[1, 0]) / S
qy = 0.25 * S
qz = (m[1, 2] + m[2, 1]) / S
else:
S = math.sqrt(1. + m[2, 2] - m[0, 0] - m[1, 1]) * 2
qw = (m[1, 0] - m[0, 1]) / S
qx = (m[0, 2] + m[2, 0]) / S
qy = (m[1, 2] + m[2, 1]) / S
qz = 0.25 * S
return np.array([qw, qx, qy, qz]) | 97a7a9c9c7a92bc2269c9ec9fee4e9e462168e50 | 11,941 |
def paging_forward(data_func, *args):
"""
Создает кнопку вперед для переключения страницы списка
:param data_func: func from UI.buttons. Действие, которое будет возвращать кнопка
:return: InlineKeyboardButton
"""
g_data = loads(data_func(*args).callback_data)
g_data['page'] += 1
text = '>>'
return InlineKeyboardButton(text, callback_data=dumps(g_data)) | 4e1ef26c005e47422b86658814cb3336b51d296f | 11,942 |
from threading import RLock
def synchronized(func):
"""Synchronizes method invocation on an object using the method name as the mutex"""
def wrapper(self,*__args,**__kw):
try:
rlock = self.__get__('_sync_lock_%s' % func.__name__)
#rlock = self._sync_lock
except AttributeError:
rlock = self.__dict__.setdefault('_sync_lock_%s' % func.__name__, RLock())
rlock.acquire()
try:
return func(self,*__args,**__kw)
finally:
rlock.release()
wrapper.__name__ = func.__name__
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
return wrapper | 8db95217e8e5e37d0e7457c0808163fd6ddc007f | 11,943 |
def vector(location_1, location_2):
"""
Returns the unit vector from location_1 to location_2
location_1, location_2: carla.Location objects
"""
x = location_2.x - location_1.x
y = location_2.y - location_1.y
z = location_2.z - location_1.z
norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps
return [x / norm, y / norm, z / norm] | 453e35f91984458e0022f7576ef17902063ee1ed | 11,944 |
def getOpenOCO(recvWindow=""):
"""# Query Open OCO (USER_DATA)
#### `GET /api/v3/openOrderList (HMAC SHA256)`
### Weight: 3
### Parameters:
Name |Type |Mandatory |Description
--------|--------|--------|--------
recvWindow |LONG |NO |The value cannot be greater than <code>60000</code>
timestamp |LONG |YES |
<strong>Data Source:</strong> """
endpoint = '/api/v3/openOrderList'
params = {
}
if recvWindow: params["recvWindow"] = recvWindow
return getbinancedata_sig(endpoint, params) | ea82f7441f57efd5ebf99ad4cff370f68cbcb367 | 11,946 |
def pixel_unshuffle(scale):
""" Pixel unshuffle.
Args:
x (Tensor): Input feature with shape (b, c, hh, hw).
scale (int): Downsample ratio.
Returns:
Tensor: the pixel unshuffled feature.
"""
if scale == 1:
return lambda x: x
def f(x):
b, c, hh, hw = x.size()
out_channel = c * (scale**2)
assert hh % scale == 0 and hw % scale == 0
h = hh // scale
w = hw // scale
x_view = x.view(b, c, h, scale, w, scale)
return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
return f | 28802c2014e5a3c28de6b751c40ad4787de8e80c | 11,947 |
import attr
def flag(name, thing=None):
"""Generate an Attribute with that name which is valued True or False."""
if thing is None:
thing = Keyword(name)
return attr(name, thing, "Flag") | 02f32444eb927dd61f0e08da7579e47d1b4a580d | 11,948 |
def select_id_from_scores_dic(id1, id2, sc_dic,
get_worse=False,
rev_filter=False):
"""
Based on ID to score mapping, return better (or worse) scoring ID.
>>> id1 = "id1"
>>> id2 = "id2"
>>> id3 = "id3"
>>> sc_dic = {'id1' : 5, 'id2': 3, 'id3': 3}
>>> select_id_from_scores_dic(id1, id2, sc_dic)
'id1'
>>> select_id_from_scores_dic(id1, id2, sc_dic, get_worse=True)
'id2'
>>> select_id_from_scores_dic(id1, id2, sc_dic, rev_filter=True, get_worse=True)
'id1'
>>> select_id_from_scores_dic(id1, id2, sc_dic, rev_filter=True)
'id2'
>>> select_id_from_scores_dic(id2, id3, sc_dic)
False
"""
sc_id1 = sc_dic[id1]
sc_id2 = sc_dic[id2]
if sc_id1 > sc_id2:
if rev_filter:
if get_worse:
return id1
else:
return id2
else:
if get_worse:
return id2
else:
return id1
elif sc_id1 < sc_id2:
if rev_filter:
if get_worse:
return id2
else:
return id1
else:
if get_worse:
return id1
else:
return id2
else:
return False | f2fa5f33eead47288c92715ce358581a72f18361 | 11,949 |
def add_args(parser):
"""Add arguments to the argparse.ArgumentParser
Args:
parser: argparse.ArgumentParser
Returns:
parser: a parser added with args
"""
# Training settings
parser.add_argument(
"--task",
type=str,
default="train",
metavar="T",
help="the type of task: train or denoise",
)
parser.add_argument(
"--datadir",
type=str,
metavar="DD",
help="data directory for training",
)
parser.add_argument(
"--noisy_wav",
type=str,
metavar="NW",
help="path to noisy wav",
)
parser.add_argument(
"--denoised_wav",
type=str,
default="denoised_sample.wav",
metavar="DW",
help="path to denoised wav",
)
parser.add_argument(
"--pretrained",
type=str,
default=None,
metavar="PT",
help="path to pre-trainedmodel",
)
parser.add_argument(
"--saved_model_path",
type=str,
default="model.pth",
metavar="SMP",
help="path to trained model",
)
parser.add_argument(
"--partition_ratio",
type=float,
default=1 / 3,
metavar="PR",
help="partition ratio for trainig (default: 1/3)",
)
parser.add_argument(
"--batch_size",
type=int,
default=5,
metavar="BS",
help="input batch size for training (default: 5)",
)
parser.add_argument(
"--lr",
type=float,
default=0.001,
metavar="LR",
help="learning rate (default: 0.3)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
metavar="M",
help="momentum (default: 0.9)",
)
parser.add_argument(
"--noise_amp",
type=float,
default=0.01,
metavar="NA",
help="amplitude of added noise for trainign (default: 0.01)",
)
parser.add_argument(
"--split_sec",
type=float,
default=1.0,
metavar="SS",
help="interval for splitting [sec]",
)
parser.add_argument(
"--epochs",
type=int,
default=5,
metavar="EP",
help="how many epochs will be trained",
)
parser.add_argument(
"--sampling_rate",
type=int,
default=16000,
metavar="SR",
help="sampling rate",
)
parser.add_argument(
"--log_interval",
type=int,
default=2,
metavar="LI",
help="log interval",
)
parser.add_argument(
"--path_to_loss",
type=str,
default=None,
metavar="PL",
help="path to png filw which shows the transtion of loss",
)
return parser | cfebbfb6e9821290efdc96aaf0f7a7470e927c70 | 11,950 |
from typing import Any
def run_interactive(package: str, action: str, *args: Any, **_kwargs: Any) -> Any:
"""Call the given action's run"""
action_cls = get(package, action)
app, interaction = args
return action_cls(app.args).run(app=app, interaction=interaction) | 5423d67b61441aaea2162042feeffe68e1d79a0c | 11,951 |
from typing import Tuple
def pythagorean_heuristic(start_point: Tuple[int, int], end_point: Tuple[int, int]) -> float:
"""Return the distance between start_point and end_point using the pythagorean distance
"""
x1, y1 = start_point
x2, y2 = end_point
distance = (((x2 - x1) ** 2) + ((y2 - y1) ** 2)) ** 0.5
return distance | b22369d3860cb0969d43c5e7eb0290a757f5c692 | 11,952 |
def run_simulation(x, simulation_time, dt, rates, sampling_time):
"""
Runs a simulation and stores the sampled sequences the matrix sequences (nb_nucleotide * nb_sequences).
x is modified during the simulation. The original sequence is included in the sequences matrix, in the first row.
"""
ii = 0
time = np.arange(0, simulation_time + 1, dt)
nb_samples = simulation_time // sampling_time
sequences = np.zeros(shape=(len(x), nb_samples + 1), dtype=bool)
for t in time:
if (t % sampling_time == 0):
sequences[:, ii] = x
ii += 1
x = simulation_step(x, dt, rates)
return sequences | b04e36fba421e9931a78f42502d69d5432b5add9 | 11,953 |
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username")
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password")
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password")
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html") | db06fe7b5f99be9ca0957c769f783ec182cc7e24 | 11,954 |
def process_item(item_soup):
"""Parse information about a single podcast episode.
@param item_soup: Soup containing information about a single podcast
episode.
@type item_soup: bs4.BeautifulSoup
@return: Dictionary describing the episode. Contains keys name (str value),
date (datetime.date), loc (url - str value), duration (seconds - int),
and orig_tags (tags applied to episode - list of str)
@rtype: dict
"""
title = item_soup.find('title').contents[0].strip()
loc = item_soup.find('guid').contents[0]
pub_date_raw = item_soup.find('pubdate').contents[0]
pub_date = common.interpret_2822_date(pub_date_raw)
tags = map(
lambda x: x.contents[0],
item_soup.findAll('category')
)
duration_soup = item_soup.find('itunes:duration')
if duration_soup == None:
duration = 1800 if 'shorts' in tags else 3600
else:
duration_str = duration_soup.contents[0]
duration = common.interpret_duration(duration_str)
return {
'name': title,
'date': pub_date,
'tags': sorted(set(tags)),
'loc': loc,
'duration': duration
} | f83c98516d1a573c11c6affbe04122f6eac32918 | 11,955 |
def update_webhook(request, log, tenantId, groupId, policyId, webhookId, data):
"""
Update a particular webhook.
A webhook may (but do not need to) include some arbitrary medata, and must
include a name.
If successful, no response body will be returned.
Example request::
{
"name": "alice",
"metadata": {
"notes": "this is for Alice"
}
}
"""
rec = get_store().get_scaling_group(log, tenantId, groupId)
deferred = rec.update_webhook(policyId, webhookId, data)
return deferred | ee33bc3a63ebe13c920288ba56a0771a10d8e2c4 | 11,956 |
def covariance_from_internal(internal_values, constr):
"""Undo a cholesky reparametrization."""
chol = chol_params_to_lower_triangular_matrix(internal_values)
cov = chol @ chol.T
return cov[np.tril_indices(len(chol))] | 04d6385f32c8d89e283be89d4f17e7fa5770115e | 11,957 |
def find_result_node(desc, xml_tree):
"""
Returns the <result> node with a <desc> child matching the given text.
Eg: if desc = "text to match", this function will find the following
result node:
<result>
<desc>text to match</desc>
</result>
Parameters
-----
xmlTree : the xml tree to search for the <result> node
desc : the text contained in the desc node
Returns
-----
node : the <result> node containing the child with the given desc
"""
result_nodes = xml_tree.findall("result")
for result_node in result_nodes:
result_desc = result_node.find("desc").text.strip()
if result_desc == desc:
return result_node
return None | 44ecfae4cd80a04e656bffbcdfbcf686c1e825f2 | 11,958 |
def y(instance):
"""Syntactic sugar to find all y-coordinates of a given class instance.
Convenience function to return all associated x-coordinates
of a given class instance.
Parameters
----------
instance : DataContainer, Mesh, R3Vector, np.array, list(RVector3)
Return the associated coordinate positions for the given class instance.
"""
return __getCoords('y', 1, instance) | aa0362148bd65427ac27f0e0e875a1cab0fd3057 | 11,959 |
import numpy
def interp_xzplane(y, u, y_target=0.0):
"""Perform linear interpolation of the 3D data at given y-location.
Parameters
----------
y : numpy.ndarray of floats
The y-coordinates along a vertical gridline as a 1D array.
u : numpy.ndarray of floats
The 3D data.
y_target : float (optional)
The y-coordinate at which to interpolate the data.
Returns
-------
u_target : numpy.ndarray of floats
The 2D interpolated data.
"""
idx = numpy.where(y >= y_target)[0][0]
y0, y1 = y[idx - 1], y[idx]
u0, u1 = u[:, idx - 1, :], u[:, idx, :]
u_target = u0 + (y_target - y0) * (u1 - u0) / (y1 - y0)
return u_target | 77f8b559c64eb2b33723a2a8e540f4d783364c84 | 11,960 |
def liste_vers_paires(l):
"""
Passer d'une structure en list(list(str)) ) list([str, str])
:param l:
:return:
"""
res = []
for i in l:
taille_i = len(i)
for j in range(taille_i-1):
for k in range(j+1, taille_i):
res.append([i[j], i[k]])
return res | 5f40e032fb9aba22656565d958ccfac828512b77 | 11,962 |
from typing import List
from typing import Dict
from typing import Any
def assert_typing(
input_text_word_predictions: List[Dict[str, Any]]
) -> List[Dict[str, str]]:
"""
this is only to ensure correct typing, it does not actually change anything
Args:
input_text_word_predictions: e.g. [
{"char_start": 0, "char_end": 7, "token": "example", "tag": "O"},
..
]
Returns:
input_text_word_predictions_str: e.g. [
{"char_start": "0", "char_end": "7", "token": "example", "tag": "O"},
..
]
"""
return [
{k: str(v) for k, v in input_text_word_prediction.items()}
for input_text_word_prediction in input_text_word_predictions
] | 0835bad510241eeb2ee1f69ac8abeca711ebbf53 | 11,963 |
import urllib
import time
def download(file):
"""Download files from live server, delete recerds of those that 404.
"""
url = 'https://www.' + settings.DOMAIN.partition('.')[2] + file.url()
try:
print(url)
return urllib.request.urlopen(url, timeout=15).read()
except urllib.error.HTTPError as e:
print(e.code, url)
file.delete()
time.sleep(.5)
except urllib.error.URLError as e:
print(e.args, url)
return '' | f1eb0bc35f3a4afa40b22e9ff4db69740b273d31 | 11,964 |
from corehq.apps.commtrack.models import StockState
def get_current_ledger_state(case_ids, ensure_form_id=False):
"""
Given a list of cases returns a dict of all current ledger data of the following format:
{
"case_id": {
"section_id": {
"product_id": StockState,
"product_id": StockState,
...
},
...
},
...
}
:param ensure_form_id: Set to True to make sure return StockState
have the ``last_modified_form_id`` field populated
"""
if not case_ids:
return {}
states = StockState.objects.filter(
case_id__in=case_ids
)
ret = {case_id: {} for case_id in case_ids}
for state in states:
sections = ret[state.case_id].setdefault(state.section_id, {})
sections[state.product_id] = state
if ensure_form_id and not state.last_modified_form_id:
transaction = StockTransaction.latest(state.case_id, state.section_id, state.product_id)
if transaction is not None:
state.last_modified_form_id = transaction.report.form_id
state.save()
return ret | 807cd430a29c7a8c377ad1822435a344d95daa7c | 11,965 |
def _FlattenPadding(padding):
"""Returns padding reduced to have only the time dimension."""
if padding is None:
return padding
r = tf.rank(padding)
return tf.reduce_min(padding, axis=tf.range(1, r)) | 0a757e3bb84ec89c0959de8a1d06667373501c9d | 11,966 |
def revive_custom_object(identifier, metadata):
"""Revives object from SavedModel."""
if ops.executing_eagerly_outside_functions():
model_class = training_lib.Model
else:
model_class = training_lib_v1.Model
revived_classes = {
'_tf_keras_layer': (RevivedLayer, base_layer.Layer),
'_tf_keras_input_layer': (RevivedInputLayer, input_layer.InputLayer),
'_tf_keras_network': (RevivedNetwork, network_lib.Network),
'_tf_keras_model': (RevivedNetwork, model_class),
'_tf_keras_sequential': (RevivedNetwork, models_lib.Sequential)
}
parent_classes = revived_classes.get(identifier, None)
if parent_classes is not None:
parent_classes = revived_classes[identifier]
revived_cls = type(
compat.as_str(metadata['class_name']), parent_classes, {})
return revived_cls._init_from_metadata(metadata) # pylint: disable=protected-access | 870db96ed17fdfe1dc535a3b38541de5a0f34688 | 11,967 |
def all_users():
"""Returns all users in database sorted by name
Returns:
QuerySet[User]: List containing each User instance
"""
# Return all unique users in Database.
# sorted by full name
# returns query set. same as python list. Each index in user_list is a user model.
user_list = User.objects.order_by("full_name")
return user_list | f952b7b1134429e9473da339bbb881011c7bb0b8 | 11,968 |
def plugin_func_list(tree):
"""Return a list of expected reports."""
return [EXPECTED_REPORT + (type(plugin_func_list),)] | 236054789507d64f1593ea13b5333b2c7db2a1aa | 11,969 |
def entity_ids(value):
"""Validate Entity IDs."""
if value is None:
raise vol.Invalid('Entity IDs can not be None')
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(',')]
return [entity_id(ent_id) for ent_id in value] | 21ab0ca35dc6b727b57e1dcd472de38a81c92d88 | 11,970 |
import math
def random_unitary(dim, seed=None):
"""
Return a random dim x dim unitary Operator from the Haar measure.
Args:
dim (int): the dim of the state space.
seed (int): Optional. To set a random seed.
Returns:
Operator: (dim, dim) unitary operator.
Raises:
QiskitError: if dim is not a positive power of 2.
"""
if seed is not None:
np.random.seed(seed)
if dim == 0 or not math.log2(dim).is_integer():
raise QiskitError("Desired unitary dimension not a positive power of 2.")
return Operator(unitary_group.rvs(dim)) | fd0599fe0a03036fee5ae31ea00b9ba0277b035d | 11,971 |
def allc(IL, IR):
"""
Compute the all-chain set (ALLC).
Parameters
----------
IL : ndarray
Left matrix profile indices
IR : ndarray
Right matrix profile indices
Returns
-------
S : list(ndarray)
All-chain set
C : ndarray
Anchored time series chain for the longest chain (also known as the
unanchored chain)
Notes
-----
`DOI: 10.1109/ICDM.2017.79 <https://www.cs.ucr.edu/~eamonn/chains_ICDM.pdf>`__
See Table II
Unlike the original paper, we've replaced the while-loop with a more stable
for-loop.
This is the implementation for the all-chain set (ALLC) and the unanchored
chain is simply the longest one among the all-chain set. Both the
all-chain set and unanchored chain are returned.
The all-chain set, S, is returned as a list of unique numpy arrays.
"""
L = np.ones(IL.size, dtype=np.int64)
S = set() # type: ignore
for i in range(IL.size):
if L[i] == 1:
j = i
C = deque([j])
for k in range(IL.size):
if IR[j] == -1 or IL[IR[j]] != j:
break
else:
j = IR[j]
L[j] = -1
L[i] = L[i] + 1
C.append(j)
S.update([tuple(C)])
C = atsc(IL, IR, L.argmax())
S = [np.array(s, dtype=np.int64) for s in S] # type: ignore
return S, C | 4ec01a2f3718430c2965173a48a0d3f8bd84f0e1 | 11,972 |
def build_resilient_url(host, port):
"""
Build basic url to resilient instance
:param host: host name
:type host: str
:param port: port
:type port: str|int
:return: base url
:rtype: str
"""
if host.lower().startswith("http"):
return "{0}:{1}".format(host, port)
return "https://{0}:{1}".format(host, port) | 33be03c5417aa41fdc888d856e760107843096e9 | 11,973 |
def relative_angle(pos1, pos2):
""" Angle between agents. An element (k,i,j) from the output is the angle at kth sample between ith (reference head) and jth (target base).
arg:
pos1: positions of the thoraces for all flies. [time, flies, y/x]
pos2: positions of the heads for all flies. [time, flies, y/x]
returns:
rel_angles: orientation of flies with respect to chamber. [time, flies, flies]
"""
d0 = pos2 - pos1
d1 = pos1[:, np.newaxis, :, :] - pos2[:, :, np.newaxis, :] # all pairwise "distances"
dot = d0[:, :, np.newaxis, 1]*d1[:, :, :, 1] + d0[:, :, np.newaxis, 0]*d1[:, :, :, 0]
det = d0[:, :, np.newaxis, 1]*d1[:, :, :, 0] - d0[:, :, np.newaxis, 0]*d1[:, :, :, 1]
rel_angles = np.arctan2(det, dot)
return rel_angles * 180.0 / np.pi | cceb6e2e2007399b7479e7ec7c7237d554905b62 | 11,974 |
def get_shape(dset):
"""
Extract the shape of a (possibly constant) dataset
Parameters:
-----------
dset: an h5py.Dataset or h5py.Group (when constant)
The object whose shape is extracted
Returns:
--------
A tuple corresponding to the shape
"""
# Case of a constant dataset
if isinstance(dset, h5py.Group):
shape = dset.attrs['shape']
# Case of a non-constant dataset
elif isinstance(dset, h5py.Dataset):
shape = dset.shape
return(shape) | fd1f1ed59542cd1a6527d9c45dd64ee4c7b47cb4 | 11,976 |
import typing
def _sanitize_bool(val: typing.Any, /) -> bool:
"""Sanitize argument values to boolean."""
if isinstance(val, str):
return val.lower() == 'true'
return bool(val) | b41c52b6e61bcc6ec8b78138f4a5ee58f7284ca3 | 11,979 |
def isSameLinkedList(linked_list1, linked_list2):
"""
Check whether two linked lists are the same.
Args:
linked_list1: -
linked_list2: -
"""
while linked_list1:
if linked_list1.val != linked_list2.val:
return False
linked_list1, linked_list2 = linked_list1.next, linked_list2.next
return True | cb41ed64b61f49c97104939fc1b1869e872f8234 | 11,980 |
from typing import Any
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator: SolcastUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]
return {
"info": async_redact_data(config_entry.options, TO_REDACT),
"data": async_redact_data(coordinator.data, TO_REDACT),
} | e8a75709612426a70c94ff30d740a6ca1ff53972 | 11,981 |
def calculate_value_function(transition_costs):
"""Recursively apply the bellman equation from the end to the start. """
state_dim = [tc.shape[0] for tc in transition_costs]
state_dim.append(transition_costs[-1].shape[1])
V = [np.zeros(d) for d in state_dim]
V_ind = [np.zeros(d) for d in state_dim]
for i in range(len(state_dim) - 2, -1, -1):
rhs = transition_costs[i] + V[i + 1]
V[i] = np.min(rhs, axis=1)
V_ind[i] = np.argmin(rhs, axis=1)
return V_ind, V | 14ef732e45581b407d9c19618c7c18b1e9bdbc4e | 11,982 |
def load_config(fname: str) -> JSON_TYPE:
"""Load a YAML file."""
return load_yaml(fname) | 6d4bab8da8853a3ec4ca47afc7d16b1b519343ab | 11,983 |
def read_tree_color_map(filename):
"""Reads a tree colormap from a file"""
infile = util.open_stream(filename)
maps = []
for line in infile:
expr, red, green, blue = line.rstrip().split("\t")
maps.append([expr, map(float, (red, green, blue))])
name2color = make_expr_mapping(maps)
def leafmap(node):
return name2color(node.name)
return tree_color_map(leafmap) | 82ebbe7b14785a5e766efe40096d94a6867c46b3 | 11,985 |
def sin_cos_encoding(arr):
""" Encode an array of angle value to correspongding Sines and Cosines, avoiding value jump in 2PI measure like from PI to -PI. """
return np.concatenate((np.sin(arr), np.cos(arr))) | ada587fc811748a01d1769385cced60cb678cf15 | 11,986 |
def atom_free_electrons(mgrph, idx):
""" number of unbound valence electrons for an atom in a molecular graph
"""
atms = atoms(mgrph)
vlnc = valence(atms[idx])
bcnt = atom_bond_count(mgrph, idx)
return vlnc - bcnt | 41989063c18f5f9d30da165528a2969fd728f4eb | 11,987 |
def identify_guest():
"""Returns with an App Engine user or an anonymous user.
"""
app_engine_user = users.get_current_user()
if app_engine_user:
return Guest.app_engine_user(app_engine_user)
ip_address = ip_address_from_request(request)
if ip_address:
return Guest.ip_address(ip_address)
else:
return Guest() | 5bb857a9477e6f7d22f3c675fc2db92935088121 | 11,988 |
def compute_placevalues(tokens):
"""Compute the placevalues for each token in the list tokens"""
pvs = []
for tok in tokens:
if tok == "point":
pvs.append(0)
else:
pvs.append(placevalue(get_value(tok)[0]))
return pvs | af67660675c3d8f55a621a300c530975bffe87ac | 11,989 |
def get_model_damping(global_step, damping_init, decay_rate, total_epochs, steps_per_epoch):
"""get_model_damping"""
damping_each_step = []
total_steps = steps_per_epoch * total_epochs
for step in range(total_steps):
epoch = (step + 1) / steps_per_epoch
damping_here = damping_init * (decay_rate ** (epoch / 10))
damping_each_step.append(damping_here)
current_step = global_step
damping_each_step = np.array(damping_each_step).astype(np.float32)
damping_now = damping_each_step[current_step:]
return damping_now | 9aeb0fff36b458886c7b38a3f0072927d2660e47 | 11,990 |
def transformData(Z,Time,Spec):
# transformData Transforms each data series based on Spec.Transformation
#
# Input Arguments:
#
# Z : T x N numeric array, raw (untransformed) observed data
# Spec : structure , model specification
#
# Output Arguments:
#
# X : T x N numeric array, transformed data (stationary to enter DFM)
"""
Transformation notes:
'lin' = Levels (No Transformation)
'chg' = Change (Difference)
'ch1' = Year over Year Change (Difference)
'pch' = Percent Change
'pc1' = Year over Year Percent Change
'pca' = Percent Change (Annual Rate)
'log' = Natural Log
"""
T,N = Z.shape
X = np.empty((T, N))
X[:] = np.nan
Freq_dict = {"m":1,"q":3}
formula_dict = {"lin":lambda x:x*2,
"chg":lambda x:np.append(np.nan,x[t1+step::step] - x[t1:-1-t1:step]),
"ch1":lambda x:x[12+t1::step] - x[t1:-12:step],
"pch":lambda x:(np.append(np.nan,x[t1+step::step]/x[t1:-1-t1:step]) - 1)*100,
"pc1":lambda x:((x[12+t1::step]/x[t1:-12:step])-1)*100,
"pca":lambda x:(np.append(np.nan,x[t1+step::step]/x[t1:-step:step])**(1/n) - 1)*100,
"log":lambda x:np.log(x)
}
for i in range(N):
formula = Spec.Transformation[i]
freq = Spec.Frequency[i]
step = Freq_dict[freq] # time step for different frequencies based on monthly time
t1 = step -1 # assume monthly observations start at beginning of quarter (subtracted 1 for indexing)
n = step/12 # number of years, needed to compute annual % changes
series = Spec.SeriesName[i]
if formula == 'lin':
X[:,i] = Z[:,i].copy()
elif formula == 'chg':
X[t1::step,i] = formula_dict['chg'](Z[:,i].copy())
elif formula == 'ch1':
X[12+t1::step, i] = formula_dict['ch1'](Z[:, i].copy())
elif formula == 'pch':
X[t1::step, i] = formula_dict['pch'](Z[:, i].copy())
elif formula == 'pc1':
X[12+t1::step, i] = formula_dict['pc1'](Z[:, i].copy())
elif formula == 'pca':
X[t1::step, i] = formula_dict['pca'](Z[:, i].copy())
elif formula == 'log':
X[:, i] = formula_dict['log'](Z[:, i].copy())
else:
ValueError("{}: Transformation is unknown".format(formula))
# Drop first quarter of observations
# since transformations cause missing values
return X[3:,:],Time[3:],Z[3:,:] | a56b6ecd4ae408cbc9e3873ba10833bd902c3249 | 11,991 |
def torch_fn():
"""Create a ReLU layer in torch."""
return ReLU() | 35ae9fe99a641768f109b8ba216271730941892e | 11,992 |
def all_user_tickets(uid, conference):
"""
Versione cache-friendly della user_tickets, restituisce un elenco di
(ticket_id, fare_type, fare_code, complete)
per ogni biglietto associato all'utente
"""
qs = _user_ticket(User.objects.get(id=uid), conference)
output = []
for t in qs:
output.append((
t.id, t.fare.ticket_type, t.fare.code,
_ticket_complete(t)
))
return output | 7778d4fa0eac0c311db8965f8bd449ad31bd49db | 11,993 |
from typing import List
def get_hardconcrete_linear_modules(module: nn.Module) -> List[nn.Module]:
"""Get all HardConcrete*Linear modules.
Parameters
----------
module : nn.Module
The input module
Returns
-------
List[nn.Module]
A list of the HardConcrete*Linear module.
"""
modules = []
for m in module.children():
if isinstance(m, HardConcreteProjectedLinear):
modules.append(m)
elif isinstance(m, HardConcreteLinear):
modules.append(m)
else:
modules.extend(get_hardconcrete_linear_modules(m))
return modules | bc821bb2fc41dbf7385b6e319323a65e0372e218 | 11,994 |
import torch
def approx_partial(model, ori_target, param, current_val, params, loss_list, information_loss_list, xs_list, ys_list, train=False, optimizer=None):
"""Compute the approximate partial derivative using the finite-difference method.
:param param:
:param current_val:
:param params:
:return:
"""
#step_size = STEP_SIZES[param]
step_size = 10
losses = []
for sign in [-1, 1]:
set_param(param, current_val + sign * step_size / 2, params)
loss = get_attack_loss(model, ori_target, information_loss_list, xs_list, ys_list,
loss_f=torch.nn.MSELoss(reduction='none'),
xs=params['x'], ys=params['y'],
shape=(320, 320), n_pixel_range=(10, 11), train=train, optimizer=optimizer)
# image = RENDERER.render()
# with torch.no_grad():
# out = MODEL(image)
# loss = CRITERION(out, LABELS).item()
losses.append(loss)
grad = (losses[1] - losses[0]) / step_size
loss_list += losses
return grad | 5cf3eff7880d2405a1dfa01a4296a974deeef70d | 11,995 |
import time
def check_successful_connections(_ctx: Context) -> bool:
"""Checks if there are no successful connections more than SUCCESSFUL_CONNECTIONS_CHECK_PERIOD sec.
Returns True if there was successful connection for last NO_SUCCESSFUL_CONNECTIONS_DIE_PERIOD_SEC sec.
:parameter _ctx: Context
"""
now_ns = time.time_ns()
lower_bound = max(_ctx.get_start_time_ns(),
_ctx.Statistic.connect.last_check_time)
diff_sec = ns2s(now_ns - lower_bound)
if _ctx.Statistic.connect.success == _ctx.Statistic.connect.success_prev:
if diff_sec > SUCCESSFUL_CONNECTIONS_CHECK_PERIOD_SEC:
_ctx.add_error(Errors('Check connection', no_successful_connections_error_msg(_ctx)))
return diff_sec <= NO_SUCCESSFUL_CONNECTIONS_DIE_PERIOD_SEC
else:
_ctx.Statistic.connect.last_check_time = now_ns
_ctx.Statistic.connect.sync_success()
_ctx.remove_error(Errors('Check connection', no_successful_connections_error_msg(_ctx)).uuid)
return True | 19a3b9ee66ad8a3a6c2b4677116616ccdd5a452b | 11,996 |
def float_or_none(string):
""" Returns float number iff string represents one, else return None. TESTS OK 2020-10-24. """
try:
return float(string)
except (ValueError, TypeError):
return None | 8cc4437841f67e5b2f884ca566f3e6870dcd7649 | 11,997 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.