content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import List
def _hostnames() -> List[str]:
"""Returns all host names from the ansible inventory."""
return sorted(_ANSIBLE_RUNNER.get_hosts()) | 8c1ed3f61887ff637d9a9091a20cc3f9e4144dde | 12,300 |
def seabass_to_pandas(path):
"""SeaBASS to Pandas DataFrame converter
Parameters
----------
path : str
path to an FCHECKed SeaBASS file
Returns
-------
pandas.DataFrame
"""
sb = readSB(path)
dataframe = pd.DataFrame.from_dict(sb.data)
return dataframe | 7988da0adb19e59d7c898658d2fe659b2d145606 | 12,301 |
def countVisits(item, value=None):
"""This function takes a pandas.Series of item tags, and an optional string for a specific tag
and returns a numpy.ndarray of the same size as the input, which contains either
1) a running count of unique transitions of item, if no target tag is given, or
2) a running count of the numer of entries to a run of target tag
:param item: a pandas Series of labels of events
:param value: optional value of the item to keep track of
:return: a running count of the unique values of items if value==None, or a running count of the specific value
"""
# make sure item is a 1-D np array or a Pandas Series
# if not isinstance(item, (pd.core.series.Series, np.ndarray) ):
assert (isinstance(item, pd.core.series.Series))
# create counter; this saves time, apparently
count = np.zeros((item.size), dtype=np.int)
if value is None:
# not specified, then we track any time item changes value
count[np.where(item != item.shift())] = 1
else:
# only when item==value
count[np.where(np.logical_and(item != item.shift(), item == value))] = 1
return count.cumsum() | dbb677cc356d867d7f861fe18e2d5c653598d20c | 12,302 |
import numpy as np
import torch
from pathlib import Path
def test_run_inference(ml_runner_with_container: MLRunner, tmp_path: Path) -> None:
"""
Test that run_inference gets called as expected.
"""
def _expected_files_exist() -> bool:
output_dir = ml_runner_with_container.container.outputs_folder
if not output_dir.is_dir():
return False
expected_files = ["test_mse.txt", "test_mae.txt"]
return all([(output_dir / p).exists() for p in expected_files])
# create the test data
N = 100
x = torch.rand((N, 1)) * 10
y = 0.2 * x + 0.1 * torch.randn(x.size())
xy = torch.cat((x, y), dim=1)
data_path = tmp_path / "hellocontainer.csv"
np.savetxt(data_path, xy.numpy(), delimiter=",")
expected_ckpt_path = ml_runner_with_container.container.outputs_folder / "checkpoints" / "last.ckpt"
assert not expected_ckpt_path.exists()
# update the container to look for test data at this location
ml_runner_with_container.container.local_dataset_dir = tmp_path
assert not _expected_files_exist()
actual_train_ckpt_path = ml_runner_with_container.checkpoint_handler.get_recovery_or_checkpoint_path_train()
assert actual_train_ckpt_path is None
ml_runner_with_container.run()
actual_train_ckpt_path = ml_runner_with_container.checkpoint_handler.get_recovery_or_checkpoint_path_train()
assert actual_train_ckpt_path == expected_ckpt_path
actual_test_ckpt_path = ml_runner_with_container.checkpoint_handler.get_checkpoints_to_test()
assert actual_test_ckpt_path == [expected_ckpt_path]
assert actual_test_ckpt_path[0].exists()
# After training, the outputs directory should now exist and contain the 2 error files
assert _expected_files_exist()
# if no checkpoint handler, no checkpoint paths will be saved and these are required for
# inference so ValueError will be raised
with pytest.raises(ValueError) as e:
ml_runner_with_container.checkpoint_handler = None # type: ignore
ml_runner_with_container.run()
assert "expects exactly 1 checkpoint for inference, but got 0" in str(e) | dc38c5582f8d69ff53f24c34d403ed3f14f964f9 | 12,303 |
def gen_accel_table(table_def):
"""generate an acceleration table"""
table = []
for i in range(1001):
table.append(0)
for limit_def in table_def:
range_start, range_end, limit = limit_def
for i in range(range_start, range_end + 1):
table[i] = limit
return table | 53d96db86068d893dfbb216e9e1283535cad9412 | 12,304 |
from typing import Tuple
import torch
def dataset_constructor(
config: ml_collections.ConfigDict,
) -> Tuple[
torch.utils.data.Dataset, torch.utils.data.Dataset, torch.utils.data.Dataset
]:
"""
Create datasets loaders for the chosen datasets
:return: Tuple (training_set, validation_set, test_set)
"""
dataset = {
"AddProblem": AdditionProblem,
"CopyMemory": CopyMemory,
"MNIST": MNIST,
"CIFAR10": CIFAR10,
"SpeechCommands": SpeechCommands,
"CharTrajectories": CharTrajectories,
}[config.dataset]
training_set = dataset(
partition="train",
seq_length=config.seq_length,
memory_size=config.memory_size,
mfcc=config.mfcc,
sr=config.sr_train,
dropped_rate=config.drop_rate,
)
test_set = dataset(
partition="test",
seq_length=config.seq_length,
memory_size=config.memory_size,
mfcc=config.mfcc,
sr=config.sr_train
if config.sr_test == 0
else config.sr_test, # Test set can be sample differently.
dropped_rate=config.drop_rate,
)
if config.dataset in ["SpeechCommands", "CharTrajectories"]:
validation_set = dataset(
partition="val",
seq_length=config.seq_length,
memory_size=config.memory_size,
mfcc=config.mfcc,
sr=config.sr_train,
dropped_rate=config.drop_rate,
)
else:
validation_set = None
return training_set, validation_set, test_set | 175e45640e85df7f76331dc99b60d73fccbbdc43 | 12,305 |
def ensemble_log_params(m, params, hess=None,
steps=scipy.inf, max_run_hours=scipy.inf,
temperature=1.0, step_scale=1.0,
sing_val_cutoff=0, seeds=None,
recalc_hess_alg = False, recalc_func=None,
save_hours=scipy.inf, save_to=None,
skip_elems = 0, log_params=True,
save_scalefactors=False):
"""
Generate a Bayesian ensemble of parameter sets consistent with the data in
the model. The sampling is done in terms of the logarithm of the parameters.
Inputs:
m -- Model to generate the ensemble for
params -- Initial parameter KeyedList to start from
hess -- Hessian of the model
steps -- Maximum number of Monte Carlo steps to attempt
max_run_hours -- Maximum number of hours to run
temperature -- Temperature of the ensemble
step_scale -- Additional scale applied to each step taken. step_scale < 1
results in steps shorter than those dictated by the quadratic
approximation and may be useful if acceptance is low.
sing_val_cutoff -- Truncate the quadratic approximation at eigenvalues
smaller than this fraction of the largest.
seeds -- A tuple of two integers to seed the random number generator
recalc_hess_alg --- If True, the Monte-Carlo is done by recalculating the
hessian matrix every timestep. This signficantly
increases the computation requirements for each step,
but it may be worth it if it improves convergence.
recalc_func --- Function used to calculate the hessian matrix. It should
take only a log parameters argument and return the matrix.
If this is None, default is to use
m.GetJandJtJInLogParameteters
save_hours --- If save_to is not None, the ensemble will be saved to
that file every 'save_hours' hours.
save_to --- Filename to save ensemble to.
skip_elems --- If non-zero, skip_elems are skipped between each included
step in the returned ensemble. For example, skip_elems=1
will return every other member. Using this option can
reduce memory consumption.
save_scalefactors --- If True, scale factors will be saved during
integration.
Outputs:
ens, ens_fes, ratio, [scale_factors]
ens -- List of KeyedList parameter sets in the ensemble
ens_fes -- List of free energies for each parameter set
ratio -- Fraction of attempted moves that were accepted
scale_factors -- List of scale factors throughout ensemble, only returned
if save_scalefactors is True.
The sampling is done by Markov Chain Monte Carlo, with a Metropolis-Hasting
update scheme. The canidate-generating density is a gaussian centered on the
current point, with axes determined by the hessian. For a useful
introduction see:
Chib and Greenberg. "Understanding the Metropolis-Hastings Algorithm"
_The_American_Statistician_ 49(4), 327-335
"""
if scipy.isinf(steps) and scipy.isinf(max_run_hours):
logger.warn('Both steps and max_run_hours are infinite! '
'Code will not stop by itself!')
if seeds is None:
seeds = int(time.time()%1 * 1e6)
logger.debug('Seeding random number generator based on system time.')
logger.debug('Seed used: %s' % str(seeds))
scipy.random.seed(seeds)
if isinstance(params, KeyedList):
param_keys = params.keys()
curr_params = copy.deepcopy(params)
curr_F = m.free_energy(curr_params, temperature)
ens, ens_Fs = [curr_params], [curr_F]
curr_sf = m.internalVars['scaleFactors'].copy()
ens_scale_factors = [curr_sf]
# We work with arrays of params through the rest of the code
curr_params = scipy.array(curr_params)
if recalc_func is None and log_params:
recalc_func = lambda p: m.GetJandJtJInLogParameters(scipy.log(p))[1]
else:
recalc_func = lambda p: m.GetJandJtJ(p)[1]
accepted_moves, attempt_exceptions, ratio = 0, 0, scipy.nan
start_time = last_save_time = time.time()
# Calculate our first hessian if necessary
if hess is None:
hess = recalc_func(curr_params)
# Generate the sampling matrix used to generate candidate moves
samp_mat = _sampling_matrix(hess, sing_val_cutoff, temperature, step_scale)
steps_attempted = 0
while steps_attempted < steps:
# Have we run too long?
if (time.time() - start_time) >= max_run_hours*3600:
break
# Generate the trial move from the quadratic approximation
deltaParams = _trial_move(samp_mat)
# Scale the trial move by the step_scale and the temperature
#scaled_step = step_scale * scipy.sqrt(temperature) * deltaParams
scaled_step = deltaParams
if log_params:
next_params = curr_params * scipy.exp(scaled_step)
else:
next_params = curr_params + scaled_step
try:
next_F = m.free_energy(next_params, temperature)
except Utility.SloppyCellException, X:
logger.warn('SloppyCellException in free energy evaluation at step '
'%i, free energy set to infinity.' % len(ens))
logger.warn('Parameters tried: %s.' % str(next_params))
attempt_exceptions += 1
next_F = scipy.inf
except Utility.ConstraintViolatedException, X:
logger.warn('ConstraintViolatedException in free energy evaluation '
'at step %i, free energy set to infinity.' % len(ens))
logger.warn('Parameters tried: %s.' % str(next_params))
attempt_exceptions += 1
next_F = scipy.inf
if recalc_hess_alg and not scipy.isinf(next_F):
try:
next_hess = recalc_func(next_params)
next_samp_mat = _sampling_matrix(next_hess, sing_val_cutoff,
temperature, step_scale)
accepted = _accept_move_recalc_alg(curr_F, samp_mat,
next_F, next_samp_mat,
deltaParams, temperature)
except Utility.SloppyCellException, X:
logger.warn('SloppyCellException in JtJ evaluation at step '
'%i, move not accepted.' % len(ens))
logger.warn('Parameters tried: %s.' % str(next_params))
attempt_exceptions += 1
next_F = scipy.inf
accepted = False
else:
accepted = _accept_move(next_F - curr_F, temperature)
steps_attempted += 1
if accepted:
accepted_moves += 1.
curr_params = next_params
curr_sf = m.internalVars['scaleFactors'].copy()
curr_F = next_F
if recalc_hess_alg:
hess = next_hess
samp_mat = next_samp_mat
if steps_attempted % (skip_elems + 1) == 0:
ens_Fs.append(curr_F)
if save_scalefactors:
ens_scale_factors.append(curr_sf)
if isinstance(params, KeyedList):
ens.append(KeyedList(zip(param_keys, curr_params)))
else:
ens.append(curr_params)
ratio = accepted_moves/steps_attempted
# Save to a file
if save_to is not None\
and time.time() >= last_save_time + save_hours * 3600:
_save_ens(ens, ens_Fs, ratio, save_to, attempt_exceptions,
steps_attempted, ens_scale_factors,
save_sf=save_scalefactors)
last_save_time = time.time()
if save_to is not None:
_save_ens(ens, ens_Fs, ratio, save_to, attempt_exceptions,
steps_attempted, ens_scale_factors,
save_sf=save_scalefactors)
if save_scalefactors:
return ens, ens_Fs, ratio, ens_scale_factors
else:
return ens, ens_Fs, ratio | 2617b297ccb6d347b8a1b6afd8f09af5a1c7db81 | 12,306 |
import os
def issue_config_exists(repo_path):
"""
returns True if the issue template config.yml file exists in the repo_path
"""
path_to_config = repo_path + "/.github/ISSUE_TEMPLATE/config.yml"
return os.path.exists(path_to_config) | 129b5b47304a60a6c10a8740dda1459c816f6ea1 | 12,307 |
import os
def _read(fd):
"""Default read function."""
return os.read(fd, 1024) | 1763d3cceb6d55c257218dbbbb52209ab55de623 | 12,308 |
def get_metrics(actual_classes, pred_classes):
"""
Function to calculate performance metrics for the classifier
For each class, the following is calculated
TP: True positives = samples that were correctly put into the class
TN: True negatives = samples that were correctly not put into the class
FP: False positive = samples that were incorectly put into the class
FN: False negatives = samples that should be in the class but were put into
another class
Parameters
----------
pred_classes : neuron types predicted by the classifier
actual_classes : known neuron types
Returns
-------
conf_mat: Confusion matrix = a visual representation of the algorithm's performance
acc: Accuracy = the fraction of correctly classified samples
MK: Markedness = a measure of how trustworthy a classification is,
accounting for both positive and negative classifications.
Value close to 1 means the classifier makes mostly correct predictions, value
close to -1 means the classifier makes mostly wrong predictions.
"""
conf_mat = metrics.confusion_matrix(actual_classes, pred_classes)
acc = metrics.balanced_accuracy_score(actual_classes, pred_classes)
"""
the next portion of code is copied from:
https://towardsdatascience.com/multi-class-classification-extracting-performance-metrics-from-the-confusion-matrix-b379b427a872
"""
FP = conf_mat.sum(axis=0) - np.diag(conf_mat)
FN = conf_mat.sum(axis=1) - np.diag(conf_mat)
TP = np.diag(conf_mat)
TN = conf_mat.sum() - (FP + FN + TP)
FP = np.sum(FP)
FN = np.sum(FN)
TP = np.sum(TP)
TN = np.sum(TN)
"""
end of copied code
"""
MK = (TP/(TP+FP)) + (TN/(TN+FN)) - 1
return conf_mat, acc, MK | 925d80d146ca29984886324338b9f99688c721b8 | 12,309 |
import dateutil
def extract_tika_meta(meta):
"""Extracts and normalizes metadata from Apache Tika.
Returns a dict with the following keys set:
- content-type
- author
- date-created
- date-modified
- original-tika-meta
The dates are encoded in the ISO format."""
def _get_flat(dict, *keys):
item = None
for key in keys:
item = dict.get(key)
if item is not None:
break
if type(item) is list:
return item[0]
return item
def _get_bool(dict, *keys):
item = _get_flat(dict, *keys)
if not item:
return False
if type(item) is bool:
return item
return item.lower() == "true"
data = {
'content-type': _get_flat(meta,
'Content-Type',
'content-type'),
'author': _get_flat(meta,
'Author',
'meta:author',
'creator'),
'date-created': _get_flat(meta,
'Creation-Date',
'dcterms:created',
'meta:created',
'created'),
'date-modified': _get_flat(meta,
'Last-Modified',
'Last-Saved-Date',
'dcterms:modified',
'meta:modified',
'created'),
'original-tika-meta': meta
}
for key in ['date-modified', 'date-created']:
if data.get(key):
data[key] = dateutil.parser.parse(data[key]).isoformat()
return data | d5e73afa3b7747d31f295acb840c3730a3e60ed1 | 12,310 |
def __gen_pause_flow(testbed_config,
src_port_id,
flow_name,
pause_prio_list,
flow_dur_sec):
"""
Generate the configuration for a PFC pause storm
Args:
testbed_config (obj): L2/L3 config of a T0 testbed
src_port_id (int): ID of the source port
flow_name (str): flow' name
pause_prio_list (list): priorities to pause for PFC frames
flow_dur_sec (float): duration of the flow in second
Returns:
flow configuration (obj): including name, packet format, rate, ...
"""
pause_time = []
for x in range(8):
if x in pause_prio_list:
pause_time.append('ffff')
else:
pause_time.append('0000')
vector = pfc_class_enable_vector(pause_prio_list)
pause_pkt = Header(PfcPause(
dst=FieldPattern(choice='01:80:C2:00:00:01'),
src=FieldPattern(choice='00:00:fa:ce:fa:ce'),
class_enable_vector=FieldPattern(choice=vector),
pause_class_0=FieldPattern(choice=pause_time[0]),
pause_class_1=FieldPattern(choice=pause_time[1]),
pause_class_2=FieldPattern(choice=pause_time[2]),
pause_class_3=FieldPattern(choice=pause_time[3]),
pause_class_4=FieldPattern(choice=pause_time[4]),
pause_class_5=FieldPattern(choice=pause_time[5]),
pause_class_6=FieldPattern(choice=pause_time[6]),
pause_class_7=FieldPattern(choice=pause_time[7]),
))
dst_port_id = (src_port_id + 1) % len(testbed_config.devices)
pause_src_point = PortTxRx(tx_port_name=testbed_config.ports[src_port_id].name,
rx_port_name=testbed_config.ports[dst_port_id].name)
"""
The minimal fixed time duration in IXIA is 1 second.
To support smaller durations, we need to use # of packets
"""
speed_str = testbed_config.layer1[0].speed
speed_gbps = int(speed_str.split('_')[1])
pause_dur = 65535 * 64 * 8.0 / (speed_gbps * 1e9)
pps = int(2 / pause_dur)
pkt_cnt = pps * flow_dur_sec
pause_flow = Flow(
name=flow_name,
tx_rx=TxRx(pause_src_point),
packet=[pause_pkt],
size=Size(64),
rate=Rate('pps', value=pps),
duration=Duration(FixedPackets(packets=pkt_cnt, delay=0))
)
return pause_flow | 953a6d3a3741b6af0b06bd8165abd7350b838b41 | 12,311 |
def parse_str_to_bio(str, dia_act):
""" parse str to BIO format """
intent = parse_intent(dia_act)
w_arr, bio_arr = parse_slots(str, dia_act)
bio_arr[-1] = intent
return ' '.join(w_arr), ' '.join(bio_arr), intent | 951cd110acd5fa53def9e781c0ab9b545d2931b8 | 12,312 |
def train_early_stop(
update_fn, validation_fn, optimizer, state, max_epochs=1e4, **early_stop_args
):
"""Run update_fn until given validation metric validation_fn increases.
"""
logger = Logger()
check_early_stop = mask_scheduler(**early_stop_args)
for epoch in jnp.arange(max_epochs):
(optimizer, state), metrics, output = update_fn(optimizer, state)
if epoch % 1000 == 0:
print(f"Loss step {epoch}: {metrics['loss']}")
if epoch % 25 == 0:
val_metric = validation_fn(optimizer, state)
stop_training, optimizer = check_early_stop(val_metric, epoch, optimizer)
metrics = {**metrics, "validation_metric": val_metric}
logger.write(metrics, epoch)
if stop_training:
print("Converged.")
break
logger.close()
return optimizer, state | a9dc6e76d2796edacc0f55b06e9cf258a90dffea | 12,313 |
from typing import List
def get_povm_object_names() -> List[str]:
"""Return the list of valid povm-related object names.
Returns
-------
List[str]
the list of valid povm-related object names.
"""
names = ["pure_state_vectors", "matrices", "vectors", "povm"]
return names | cb80899b9b3a4aca4bfa1388c6ec9c61c59978a4 | 12,314 |
def choose(a,b):
""" n Choose r function """
a = op.abs(round(a))
b = op.abs(round(b))
if(b > a):
a, b = b, a
return factorial(a) / (factorial(b) * factorial(a-b)) | 30b70dc950e9f6d501cf5ef07bfed682dce41c43 | 12,315 |
from typing import List
import torch
from typing import Optional
def pad_and_stack_list_of_tensors(lst_embeddings: List[torch.Tensor], max_sequence_length: Optional[int] = None,
return_sequence_length: bool = False):
"""
it takes the list of embeddings as the input, then applies zero-padding and stacking to transform it as
@param lst_embeddings:
@param max_sequence_length:
"""
dim = -2 # second last axis. it must be the sequence dimension.
lst_seq_len = [embeddings.shape[dim] for embeddings in lst_embeddings]
if max_sequence_length is None:
max_sequence_length = max(lst_seq_len)
else:
n_max = max(lst_seq_len)
assert max_sequence_length >= n_max, \
f"`max_sequence_length` must be greater or equal to max. embeddings size: {n_max} > {max_sequence_length}"
lst_padded_embeddings = [pad_trailing_tensors(e_t, max_sequence_length) for e_t in lst_embeddings]
stacked_embeddings = torch.stack(lst_padded_embeddings)
if return_sequence_length:
return stacked_embeddings, lst_seq_len
else:
return stacked_embeddings | 78c3a11f7ff79798d9b86703318eabb8da32695a | 12,316 |
from typing import List
def bq_solid_for_queries(sql_queries):
"""
Executes BigQuery SQL queries.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
"""
sql_queries = check.list_param(sql_queries, 'sql queries', of_type=str)
@solid(
input_defs=[InputDefinition(_START, Nothing)],
output_defs=[OutputDefinition(List[DataFrame])],
config_field=define_bigquery_query_config(),
required_resource_keys={'bigquery'},
metadata={'kind': 'sql', 'sql': '\n'.join(sql_queries)},
)
def bq_solid(context): # pylint: disable=unused-argument
query_job_config = _preprocess_config(context.solid_config.get('query_job_config', {}))
# Retrieve results as pandas DataFrames
results = []
for sql_query in sql_queries:
# We need to construct a new QueryJobConfig for each query.
# See: https://bit.ly/2VjD6sl
cfg = QueryJobConfig(**query_job_config) if query_job_config else None
context.log.info(
'executing query %s with config: %s'
% (sql_query, cfg.to_api_repr() if cfg else '(no config provided)')
)
results.append(
context.resources.bigquery.query(sql_query, job_config=cfg).to_dataframe()
)
return results
return bq_solid | 0b7d71d6ec6aca87a581c8e0876bc1d88e7242c8 | 12,317 |
def mock_accession_unreplicated(
mocker: MockerFixture,
mock_accession_gc_backend,
mock_metadata,
lab: str,
award: str,
) -> Accession:
"""
Mocked accession instance with dummy __init__ that doesn't do anything and pre-baked
assembly property. @properties must be patched before instantiation
"""
mocker.patch.object(
Accession,
"experiment",
new_callable=PropertyMock(
return_value=EncodeExperiment(
{
"@id": "foo",
"assay_term_name": "microRNA",
"replicates": [
{"biological_replicate_number": 1, "status": "released"}
],
}
)
),
)
mocked_accession = AccessionMicroRna(
"imaginary_steps.json",
Analysis(mock_metadata, backend=mock_accession_gc_backend),
"mock_server.biz",
EncodeCommonMetadata(lab, award),
Recorder(use_in_memory_db=True),
no_log_file=True,
)
return mocked_accession | c221c34a72809737d22b76beff89a18eece128ff | 12,318 |
from typing import Optional
def get_prepared_statement(statement_name: Optional[str] = None,
work_group: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPreparedStatementResult:
"""
Resource schema for AWS::Athena::PreparedStatement
:param str statement_name: The name of the prepared statement.
:param str work_group: The name of the workgroup to which the prepared statement belongs.
"""
__args__ = dict()
__args__['statementName'] = statement_name
__args__['workGroup'] = work_group
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:athena:getPreparedStatement', __args__, opts=opts, typ=GetPreparedStatementResult).value
return AwaitableGetPreparedStatementResult(
description=__ret__.description,
query_statement=__ret__.query_statement) | e3bcd74b2bc9093a0fff822a4f35b0de4dab3e03 | 12,319 |
def handle_col(element, box, _get_image_from_uri, _base_url):
"""Handle the ``span`` attribute."""
if isinstance(box, boxes.TableColumnBox):
integer_attribute(element, box, 'span')
if box.span > 1:
# Generate multiple boxes
# http://lists.w3.org/Archives/Public/www-style/2011Nov/0293.html
return [box.copy() for _i in range(box.span)]
return [box] | ef9fe04982bbb278df1453104823235ecf23113f | 12,320 |
def get_dotted_field(input_dict: dict, accessor_string: str) -> dict:
"""Gets data from a dictionary using a dotted accessor-string.
Parameters
----------
input_dict : dict
A nested dictionary.
accessor_string : str
The value in the nested dict.
Returns
-------
dict
Data from the dictionary.
"""
current_data = input_dict
for chunk in accessor_string.split("."):
current_data = current_data.get(chunk, {})
return current_data | 2c82c0512384810e77a5fb53c73f67d2055dc98e | 12,321 |
import re
def separa_frases(sentenca):
"""[A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca]
Arguments:
sentenca {[str]} -- [recebe uma frase]
Returns:
[lista] -- [lista das frases contidas na sentença]
"""
return re.split(r'[,:;]+', sentenca) | d3ac427172e34054119659adc55295ac27965e6c | 12,322 |
import pathlib
import json
import importlib
def read_datasets(path=None, filename="datasets.json"):
"""Read the serialized (JSON) dataset list
"""
if path is None:
path = _MODULE_DIR
else:
path = pathlib.Path(path)
with open(path / filename, 'r') as fr:
ds = json.load(fr)
# make the functions callable
for _, dset_opts in ds.items():
args = dset_opts.get('load_function_args', {})
kwargs = dset_opts.get('load_function_kwargs', {})
fail_func = partial(unknown_function, dset_opts['load_function_name'])
func_mod_name = dset_opts.get('load_function_module', None)
if func_mod_name:
func_mod = importlib.import_module(func_mod_name)
else:
func_mod = _MODULE
func_name = getattr(func_mod, dset_opts['load_function_name'], fail_func)
func = partial(func_name, *args, **kwargs)
dset_opts['load_function'] = func
return ds | ade3b9169d0f1db45d3358f27a54ea634f6d883e | 12,323 |
def as_actor(input, actor) :
"""Takes input and actor, and returns [as
<$actor>]$input[endas]."""
if " " in actor :
repla = "<%s>"%actor
else :
repla = actor
return "[as %s]%s[endas]" % (repla, input) | dc9bd33bd6b2156f4fa353db2a0b01bfa6dd1357 | 12,324 |
def error_403(request):
"""View rendered when encountering a 403 error."""
return error_view(request, 403, _("Forbidden"),
_("You are not allowed to acces to the resource %(res)s.")
% {"res": request.path}) | 1e104b006100f296ab8f816abae8272b35c9399b | 12,325 |
def _format_param(name, optimizer, param):
"""Return correctly formatted lr/momentum for each param group."""
if isinstance(param, (list, tuple)):
if len(param) != len(optimizer.param_groups):
raise ValueError("expected {} values for {}, got {}".format(
len(optimizer.param_groups), name, len(param)))
return param
else:
return [param] * len(optimizer.param_groups) | 52904bdfb1cba7fe3175606bf77f5e46b3c7df80 | 12,326 |
def as_binary_vector(labels, num_classes):
"""
Construct binary label vector given a list of label indices.
Args:
labels (list): The input label list.
num_classes (int): Number of classes of the label vector.
Returns:
labels (numpy array): the resulting binary vector.
"""
label_arr = np.zeros((num_classes,))
for lbl in set(labels):
label_arr[lbl] = 1.0
return label_arr | 176a1148d90dcd336ea29ac13b73cc7a6c0cdc60 | 12,327 |
def evaluation_lda(model, data, dictionary, corpus):
""" Compute coherence score and perplexity.
params:
model: lda model
data: list of lists (tokenized)
dictionary
corpus
returns: coherence score, perplexity score
"""
coherence_model_lda = CoherenceModel(model=model, texts=data, dictionary=dictionary, coherence='c_v')
coherence = coherence_model_lda.get_coherence()
perplexity = model.log_perplexity(corpus)
return coherence, perplexity | c38e3ed3728b9a598ec0cf36c07d606daeb8f388 | 12,328 |
def get_map_with_square(map_info, square):
"""
build string of the map with its top left
bigger square without obstacle full
"""
map_string = ""
x_indices = list(range(square["x"], square["x"] + square["size"]))
y_indices = list(range(square["y"], square["y"] + square["size"]))
M = map_info["matrix"]
for y in range(map_info["line_num"]):
if map_string:
map_string += '\n'
for x in range(map_info["line_len"]):
if M[y][x]:
map_string += map_info["obstacle_char"]
elif x in x_indices and y in y_indices:
map_string += map_info["full_char"]
else:
map_string += map_info["empty_char"]
return map_string | 20d405edd8e5e86e943c297455ebfbeb54b669f8 | 12,329 |
def bgr_colormap():
"""
In cdict, the first column is interpolated between 0.0 & 1.0 - this indicates the value to be plotted
the second column specifies how interpolation should be done from below
the third column specifies how interpolation should be done from above
if the second column does not equal the third, then there will be a break in the colors
"""
darkness = 0.85 #0 is black, 1 is white
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, darkness, darkness),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, darkness, darkness),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 1.0, 1.0),
(0.5, darkness, darkness),
(1.0, 0.0, 0.0))
}
return LinearSegmentedColormap("bgr", cdict) | ffeb0d415c237a5f8cc180e86bb08d73e443b133 | 12,330 |
def autovalidation_from_docstring():
"""
Test validation using JsonSchema
The default payload is invalid, try it, then change the age to a
valid integer and try again
---
tags:
- officer
parameters:
- name: body
in: body
required: true
schema:
id: Officer
required:
- name
- age
properties:
name:
type: string
description: The officer's name.
default: "James T. Kirk"
age:
type: integer
description: The officer's age (should be integer)
default: "138"
tags:
type: array
description: optional list of tags
default: ["starfleet", "captain", "enterprise", "dead"]
items:
type: string
responses:
200:
description: A single officer item
schema:
$ref: '#/definitions/Officer'
"""
data = request.json
return jsonify(data) | 82cb9d043666b465226712e6b12be94291ac5792 | 12,331 |
import requests
def get_vlan_groups(url, headers):
"""
Get dictionary of existing vlan groups
"""
vlan_groups = []
api_url = f"{url}/api/ipam/vlan-groups/"
response = requests.request("GET", api_url, headers=headers)
all_vlan_groups = response.json()["results"]
for vlan_group in all_vlan_groups:
vlan_group_info = dict()
vlan_group_info["name"] = vlan_group["name"]
vlan_group_info["state"] = "present"
if vlan_group["site"] is not None:
vlan_group_info["site"] = vlan_group["site"]["name"]
else:
vlan_group_info["site"] = None
vlan_groups.append(vlan_group_info)
return vlan_groups | c0494708e4d2cb5b61a8e4c7ac4136051b1903c7 | 12,332 |
def getLastReading(session: Session) -> Reading:
"""
Finds the last reading associated with the session
NB: Always returns a Reading, because every Session has at least 1 Reading
Args:
session (Session): A Session object representing the session record in the database
Returns:
datetime: Time object of last reading
"""
return Reading.objects.filter(session_id=session.pk).order_by("t").reverse()[:1].get() | 87f9e86316bf3975077797832225bbe9b027e648 | 12,333 |
def process_outlier(data, population_set):
"""
Parameters
----------
data
population_set
Returns
-------
"""
content = list()
for date in set(map(lambda x: x['date'], data)):
tmp_item = {
"date": date,
"value": list()
}
for value in filter(lambda d: d["date"] == date, data):
tmp_value = deepcopy(value)
del tmp_value["date"]
if population_set:
tmp_value["rate"] = round(
tmp_value["value"] / population_set[tmp_value["age"]] *
RATE_PER_POPULATION_FACTOR,
1
)
tmp_item["value"].append(tmp_value)
content.append(tmp_item)
return deepcopy(content) | e793aa85bf6b14406d495775a89d37a68ae6bf8b | 12,334 |
import six
def valid_http(http_success=HTTPOk, # type: Union[Type[HTTPSuccessful], Type[HTTPRedirection]]
http_kwargs=None, # type: Optional[ParamsType]
detail="", # type: Optional[Str]
content=None, # type: Optional[JSON]
content_type=CONTENT_TYPE_JSON, # type: Optional[Str]
): # type: (...) -> Union[HTTPSuccessful, HTTPRedirection]
"""
Returns successful HTTP with standardized information formatted with content type. (see :func:`raise_http` for HTTP
error calls)
:param http_success: any derived class from *valid* HTTP codes (<400) (default: `HTTPOk`)
:param http_kwargs: additional keyword arguments to pass to `http_success` when called
:param detail: additional message information (default: empty)
:param content: json formatted content to include
:param content_type: format in which to return the exception (one of `magpie.common.SUPPORTED_ACCEPT_TYPES`)
:returns: formatted successful response with additional details and HTTP code
"""
global RAISE_RECURSIVE_SAFEGUARD_COUNT # pylint: disable=W0603
content = dict() if content is None else content
detail = repr(detail) if not isinstance(detail, six.string_types) else detail
content_type = CONTENT_TYPE_JSON if content_type == CONTENT_TYPE_ANY else content_type
http_code, detail, content = validate_params(http_success, [HTTPSuccessful, HTTPRedirection],
detail, content, content_type)
json_body = format_content_json_str(http_code, detail, content, content_type)
resp = generate_response_http_format(http_success, http_kwargs, json_body, content_type=content_type)
RAISE_RECURSIVE_SAFEGUARD_COUNT = 0 # reset counter for future calls (don't accumulate for different requests)
return resp | 6c88712cd501291fe126b87086ee29700f44832b | 12,335 |
def operating_cf(cf_df):
"""Checks if the latest reported OCF (Cashflow) is positive.
Explanation of OCF: https://www.investopedia.com/terms/o/operatingcashflow.asp
cf_df = Cashflow Statement of the specified company
"""
cf = cf_df.iloc[cf_df.index.get_loc("Total Cash From Operating Activities"),0]
if (cf > 0):
return True
else:
return False | ed6a849fa504b79cd65c656d9a1318aaaeed52bf | 12,336 |
from io import StringIO
def generate_performance_scores(query_dataset, target_variable, candidate_datasets, params):
"""Generates all the performance scores.
"""
performance_scores = list()
# params
algorithm = params['regression_algorithm']
cluster_execution = params['cluster']
hdfs_address = params['hdfs_address']
hdfs_user = params['hdfs_user']
inner_join = params['inner_join']
# HDFS Client
hdfs_client = None
if cluster_execution:
# time.sleep(np.random.randint(1, 120)) # avoid opening multiple sockets at the same time
hdfs_client = InsecureClient(hdfs_address, user=hdfs_user)
# reading query dataset
query_data_str = read_file(query_dataset, hdfs_client, cluster_execution)
query_data = pd.read_csv(StringIO(query_data_str))
query_data.set_index(
'key-for-ranking',
drop=True,
inplace=True
)
# build model on query data only
_, scores_before = get_performance_scores(
query_data,
target_variable,
algorithm,
False
)
for candidate_dataset in candidate_datasets:
# reading candidate dataset
candidate_data_str = read_file(candidate_dataset, hdfs_client, cluster_execution)
candidate_data = pd.read_csv(StringIO(candidate_data_str))
candidate_data.set_index(
'key-for-ranking',
drop=True,
inplace=True
)
# join dataset
join_ = query_data.join(
candidate_data,
how='left',
rsuffix='_r'
)
if inner_join:
join_.dropna(inplace=True)
# build model on joined data
# print('[INFO] Generating performance scores for query dataset %s and candidate dataset %s ...' % (query_dataset, candidate_dataset))
imputation_strategy, scores_after = get_performance_scores(
join_,
target_variable,
algorithm,
not(inner_join)
)
# print('[INFO] Performance scores for query dataset %s and candidate dataset %s done!' % (query_dataset, candidate_dataset))
performance_scores.append(
generate_output_performance_data(
query_dataset=query_dataset,
target=target_variable,
candidate_dataset=candidate_dataset,
scores_before=scores_before,
scores_after=scores_after,
imputation_strategy=imputation_strategy
)
)
return performance_scores | b8cb09973f17aab2c16515a026747c3e006bfd35 | 12,337 |
import cmath
import math
def correct_sparameters_twelve_term(sparameters_complex,twelve_term_correction,reciprocal=True):
"""Applies the twelve term correction to sparameters and returns a new sparameter list.
The sparameters should be a list of [frequency, S11, S21, S12, S22] where S terms are complex numbers.
The twelve term correction should be a list of
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr] where Edf, etc are complex numbers"""
if len(sparameters_complex) != len(twelve_term_correction):
raise TypeError("s parameter and twelve term correction must be the same length")
sparameter_out=[]
phase_last=0.
for index,row in enumerate(sparameters_complex):
frequency=row[0]
Sm=np.matrix(row[1:]).reshape((2,2))
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]=twelve_term_correction[index]
# frequency Edf Esf Erf Exf Elf Etf Edr Esr Err Exr Elr Etr.
# print [frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]
# print Sm[0,0]
D =(1+(Sm[0,0]-Edf)*(Esf/Erf))*(1+(Sm[1,1]-Edr)*(Esr/Err))-(Sm[0,1]*Sm[1,0]*Elf*Elr)/(Etf*Etr)
# print D
S11 =(Sm[0,0]-Edf)/(D*Erf)*(1+(Sm[1,1]-Edr)*(Esr/Err))-(Sm[0,1]*Sm[1,0]*Elf)/(D*Etf*Etr)
S21 =((Sm[1,0]-Exr)/(D*Etf))*(1+(Sm[1,1]-Edr)*(Esr-Elf)/Err)
S12 = ((Sm[0,1]-Exf)/(D*Etr))*(1+(Sm[0,0]-Edf)*(Esf-Elr)/Erf)
S22 = (Sm[1,1]-Edr)/(D*Err)*(1+(Sm[0,0]-Edf)*(Esf/Erf))-(Sm[0,1]*Sm[1,0]*Elr)/(D*Etf*Etr)
# S12 and S21 are averaged together in a weird way that makes phase continuous
geometric_mean=cmath.sqrt(S21*S12)
root_select=1
phase_new=cmath.phase(geometric_mean)
# if the phase jumps by >180 but less than 270, then pick the other root
if abs(phase_new-phase_last)>math.pi/2 and abs(phase_new-phase_last)<3*math.pi/2:
root_select=-1
mean_S12_S21=root_select*cmath.sqrt(S21*S12)
if reciprocal:
sparameter_out.append([frequency,S11,mean_S12_S21,mean_S12_S21,S22])
else:
sparameter_out.append([frequency,S11,S21,S12,S22])
phase_last=cmath.phase(mean_S12_S21)
return sparameter_out | e957c8eebd905b93b45e79a7349c1fca895c5430 | 12,338 |
def api_activity_logs(request):
"""Test utility."""
auth = get_auth(request)
obj = ActivityLogs(auth=auth)
check_apiobj(authobj=auth, apiobj=obj)
return obj | 7b13f382e71971b6ed93154a591a27f95fd81a2c | 12,339 |
def RNAshapes_parser(lines=None,order=True):
"""
Returns a list containing tuples of (sequence,pairs object,energy) for
every sequence
[[Seq,Pairs,Ene],[Seq,Pairs,Ene],...]
Structures will be ordered by the structure energy by default, of ordered
isnt desired set order to False
"""
result = lineParser(lines)
if order:
result = order_structs(result)
return result | 3c45a4f6efb190cb26512dea4a55c44292191e0f | 12,340 |
from typing import Callable
from typing import Optional
from typing import Union
from typing import Dict
from typing import Any
def get_case_strategy( # pylint: disable=too-many-locals
draw: Callable,
operation: APIOperation,
hooks: Optional[HookDispatcher] = None,
data_generation_method: DataGenerationMethod = DataGenerationMethod.default(),
path_parameters: Union[NotSet, Dict[str, Any]] = NOT_SET,
headers: Union[NotSet, Dict[str, Any]] = NOT_SET,
cookies: Union[NotSet, Dict[str, Any]] = NOT_SET,
query: Union[NotSet, Dict[str, Any]] = NOT_SET,
body: Any = NOT_SET,
) -> Any:
"""A strategy that creates `Case` instances.
Explicit `path_parameters`, `headers`, `cookies`, `query`, `body` arguments will be used in the resulting `Case`
object.
If such explicit parameters are composite (not `body`) and don't provide the whole set of parameters for that
location, then we generate what is missing and merge these two parts. Note that if parameters are optional, then
they may remain absent.
The primary purpose of this behavior is to prevent sending incomplete explicit examples by generating missing parts
as it works with `body`.
"""
to_strategy = DATA_GENERATION_METHOD_TO_STRATEGY_FACTORY[data_generation_method]
context = HookContext(operation)
with detect_invalid_schema(operation):
path_parameters_value = get_parameters_value(
path_parameters, "path", draw, operation, context, hooks, to_strategy
)
headers_value = get_parameters_value(headers, "header", draw, operation, context, hooks, to_strategy)
cookies_value = get_parameters_value(cookies, "cookie", draw, operation, context, hooks, to_strategy)
query_value = get_parameters_value(query, "query", draw, operation, context, hooks, to_strategy)
media_type = None
if body is NOT_SET:
if operation.body:
parameter = draw(st.sampled_from(operation.body.items))
strategy = _get_body_strategy(parameter, to_strategy, operation)
strategy = apply_hooks(operation, context, hooks, strategy, "body")
media_type = parameter.media_type
body = draw(strategy)
else:
media_types = operation.get_request_payload_content_types() or ["application/json"]
# Take the first available media type.
# POSSIBLE IMPROVEMENT:
# - Test examples for each available media type on Open API 2.0;
# - On Open API 3.0, media types are explicit, and each example has it.
# We can pass `OpenAPIBody.media_type` here from the examples handling code.
media_type = media_types[0]
if operation.schema.validate_schema and operation.method.upper() == "GET" and operation.body:
raise InvalidSchema("Body parameters are defined for GET request.")
return Case(
operation=operation,
media_type=media_type,
path_parameters=path_parameters_value,
headers=CaseInsensitiveDict(headers_value) if headers_value is not None else headers_value,
cookies=cookies_value,
query=query_value,
body=body,
data_generation_method=data_generation_method,
) | d46fde928b0ceaa3886904e35876c245e7fcb245 | 12,341 |
def type_from_value(value, visitor=None, node=None):
"""Given a Value from resolving an annotation, return the type."""
ctx = _Context(visitor, node)
return _type_from_value(value, ctx) | 92568581d8f7b47ac469d0575f549acb1b67c857 | 12,342 |
def _accesslen(data) -> int:
"""This was inspired by the `default_collate` function.
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/
"""
if isinstance(data, (tuple, list)):
item = data[0]
if not isinstance(item, (float, int, str)):
return len(item)
return len(data) | df709ee8a97c920a1413c9d7240f83d0406577a6 | 12,343 |
def createSkill(request, volunteer_id):
"""
Method to create skills and interests
:param request:
:param volunteer_id:
:return:
"""
if request.method == 'POST':
volunteer = Volunteer_User_Add_Ons.objects.get(pk=volunteer_id)
skills = request.POST.getlist('skills')
interests = request.POST.getlist('interests')
# call to create the skills
createInputToken(request, skills, 'Skill', volunteer_id)
# call to create the interests
createInputToken(request, interests, 'Interest', volunteer_id)
return HttpResponse('ok') | f612ef94b02664526018fd2ea948a36587cb15bf | 12,344 |
def analyticJacobian(robot : object, dq = 0.001, symbolic = False):
"""Using Homogeneous Transformation Matrices, this function computes Analytic Jacobian Matrix of a serial robot given joints positions in radians. Serial robot's kinematic parameters have to be set before using this function
Args:
robot (Serial): serial robot (this won't work with other type of robots)
dq (float, optional): step size for numerical derivative. Defaults to 0.001.
symbolic (bool, optional): used to calculate symbolic equations. Defaults to False.
Returns:
J (np.array): Inertial Analytic Jacobian Matrix (numerical)
J (SymPy Matrix): Inertial Analytic Jacobian Matrix (symbolical)
"""
# Calculate forward kinematics: f(q)
fkHTM = forwardHTM(robot, symbolic)
# Convert result into an Axis - Angle vector: x(q)
x = axisAngle(fkHTM[-1], symbolic)
if symbolic:
# Calculate Analytic Jacobian Matrix by differentiating Axis - Angle vector with SymPy functions
return nsimplify(trigsimp(x.jacobian(robot.qSymbolic)).evalf(), tolerance = 1e-10)
else:
# Get number of joints (generalized coordinates)
n = robot.jointsPositions.shape[0]
# Initializes jacobian matrix with zeros
J = np.zeros((6, n))
# Auxiliar variable to keep original joints positions
q = robot.jointsPositions.copy()
# Iterates through all colums (generalized coordinates)
for j in range(n):
# Set increment to current generalized coordinate: z[j] = q[j] + dq
robot.jointsPositions[j] += dq
# Calculate forward kinematics with step size: f(z) = f(q + dq)
f = forwardHTM(robot)
# Convert result into an Axis - Angle vector: X(q + dq)
X = axisAngle(f[-1])
# Calculate analytic jacobian matrix: [X(q + dq) - x(q)] / dq
J[: , j] = ((X - x) / dq).flatten()
# Eliminates step size by copying original values from auxiliar variable
robot.jointsPositions[:, :] = q
return J | a906148f26fea9bb9d833ac95dffde87a704e372 | 12,345 |
def test_sharedmethod_reuse_on_subclasses():
"""
Regression test for an issue where sharedmethod would bind to one class
for all time, causing the same method not to work properly on other
subclasses of that class.
It has the same problem when the same sharedmethod is called on different
instances of some class as well.
"""
class AMeta(type):
def foo(cls):
return cls.x
class A:
x = 3
def __init__(self, x):
self.x = x
@sharedmethod
def foo(self):
return self.x
a1 = A(1)
a2 = A(2)
assert a1.foo() == 1
assert a2.foo() == 2
# Similar test now, but for multiple subclasses using the same sharedmethod
# as a classmethod
assert A.foo() == 3
class B(A):
x = 5
assert B.foo() == 5 | 829ad4fafb32cb18d8da7b8144be25746f892ce5 | 12,346 |
import os
import inspect
def get_subtask_spec_factory_classes():
"""Return dictionary with all factory classes defined in files in this directory.
This file is excluded from the search."""
this_file = os.path.split(__file__)[-1]
directory = os.path.dirname(__file__)
exclude = [this_file, "subtask_spec_factory.py"]
factory_files = [f for f in os.listdir(directory)
if f.endswith(".py") and f not in exclude]
factory_classes = {}
for f in factory_files:
path = os.path.join(directory, f)
relative_import_string = "." + inspect.getmodulename(path)
module = import_module(relative_import_string, package=__package__)
for name in dir(module):
obj = getattr(module, name)
if inspect.isclass(obj):
if issubclass(obj, SubtaskSpecFactory):
factory_classes[name] = obj
return factory_classes | 500e30e5ed3ba8a7aad660ad80fa123485a9f9b1 | 12,347 |
def triu_indices_from(arr, k=0):
"""
Returns the indices for the upper-triangle of `arr`.
Args:
arr (Union[Tensor, list, tuple]): 2-dimensional array.
k (int, optional): Diagonal offset, default is 0.
Returns:
triu_indices_from, tuple of 2 tensor, shape(N)
Indices for the upper-triangle of `arr`.
Raises:
TypeError: If `arr` cannot be converted to tensor, or `k` is not a number.
ValueError: If `arr` cannot be converted to a 2-dimensional tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> tensor = np.ones((3,3))
>>> print(np.triu_indices_from(tensor))
(Tensor(shape=[6], dtype=Int32, value= [0, 0, 0, 1, 1, 2]),
Tensor(shape=[6], dtype=Int32, value= [0, 1, 2, 1, 2, 2]))
"""
arr = asarray(arr)
if arr.ndim != 2:
_raise_value_error("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) | b95a7ed3fac1810bdfe9659471cbcd2d14fc8c99 | 12,348 |
def is_debug():
"""Return true if xylem is set to debug console output."""
global _debug
return _debug | b605a56645aecbd995142d964efd1935ade29673 | 12,349 |
def func(var):
"""Function"""
return var + 1 | a6ca4247f7f7307c384708ed9535046e4ec7d4e3 | 12,350 |
def flanking_regions_fasta_deletion(genome, dataframe, flanking_region_size):
"""
Makes batch processing possible, pulls down small region
of genome for which to design primers around.
This is based on the chromosome and position of input file.
Each Fasta record will contain:
>Sample_Gene_chr:posStart-posStop
Seq of flanking region upstream of SV + seq of flanking region downstream of SV
Args:
genome (list): genome list of tuples (header, seq).
dataframe (pandas object): dataframe with sample info.
flanking_region_size (int): length of sequence upstream and downstream of
input coordinate position to pull as sequence to design primers around.
"""
output = []
for headers, seqs in genome:
chrm = str(headers)
seq = str(seqs)
for gene, sample, chrom, start, stop in zip(dataframe.Gene, dataframe.Sample, dataframe.Chr,
dataframe.PosStart, dataframe.PosStop):
if str(chrom) == chrm:
header = str(str(sample)+"_"+str(gene)+"_"+\
str(chrom)+":"+str(start)+"-"+str(stop)+"__")
flank_seq = seq[int(start)-int(flanking_region_size):int(start)+1]\
+seq[int(stop):(int(stop)+int(flanking_region_size))]
output.append((header, flank_seq.upper()))
return output | a20da206630d1f2fb002c5ca63eab9f240b1f1d5 | 12,351 |
import functools
def numpy_episodes(
train_dir, test_dir, shape, loader, preprocess_fn=None, scan_every=10,
num_chunks=None, **kwargs):
"""Read sequences stored as compressed Numpy files as a TensorFlow dataset.
Args:
train_dir: Directory containing NPZ files of the training dataset.
test_dir: Directory containing NPZ files of the testing dataset.
shape: Tuple of batch size and chunk length for the datasets.
use_cache: Boolean. Set to True to cache episodes in memory. Default is to
read episodes from disk every time.
**kwargs: Keyword arguments to forward to the read episodes implementation.
Returns:
Structured data from numpy episodes as Tensors.
"""
try:
dtypes, shapes = _read_spec(train_dir, **kwargs)
except ZeroDivisionError:
dtypes, shapes = _read_spec(test_dir, **kwargs)
loader = {
'scan': functools.partial(_read_episodes_scan, every=scan_every),
'reload': _read_episodes_reload,
'dummy': _read_episodes_dummy,
}[loader]
train = tf.data.Dataset.from_generator(
functools.partial(loader, train_dir, shape[0], **kwargs), dtypes, shapes)
test = tf.data.Dataset.from_generator(
functools.partial(loader, test_dir, shape[0], **kwargs), dtypes, shapes)
chunking = lambda x: tf.data.Dataset.from_tensor_slices(
# Returns dict of image, action, reward, length tensors with num_chunks in 0 dim.
chunk_sequence(x, shape[1], True, num_chunks))
def sequence_preprocess_fn(sequence):
if preprocess_fn:
with tf.device('/cpu:0'):
sequence['image'] = preprocess_fn(sequence['image'])
return sequence
# This transformation (flat_map):
# 1. Chunk each sequence,
# 2. From each sequence one can get variable number of chunks
# (first dim. of a tensor is chunks number, like with batches).
# Flatten to get the dataset of chunks.
train = train.flat_map(chunking)
train = train.shuffle(100 * shape[0])
train = train.batch(shape[0], drop_remainder=True)
train = train.map(sequence_preprocess_fn, 10).prefetch(20)
test = test.flat_map(chunking)
test = test.shuffle(100 * shape[0])
test = test.batch(shape[0], drop_remainder=True)
test = test.map(sequence_preprocess_fn, 10).prefetch(20)
return attr_dict.AttrDict(train=train, test=test) | fd9c727c64bdd725ef1615754d12b93f21568c2f | 12,352 |
def fft_convolve(ts, query):
"""
Computes the sliding dot product for query over the time series using
the quicker FFT convolution approach.
Parameters
----------
ts : array_like
The time series.
query : array_like
The query.
Returns
-------
array_like - The sliding dot product.
"""
n = len(ts)
m = len(query)
x = np.fft.fft(ts)
y = np.append(np.flipud(query), np.zeros([1, n - m]))
y = np.fft.fft(y)
z = np.fft.ifft(x * y)
return np.real(z[m - 1:n]) | 7e1fec2a3b30770909d7c185bbc0b4885cb7eb22 | 12,353 |
from typing import List
from typing import Optional
def _add_merge_gvcfs_job(
b: hb.Batch,
gvcfs: List[hb.ResourceGroup],
output_gvcf_path: Optional[str],
sample_name: str,
) -> Job:
"""
Combine by-interval GVCFs into a single sample GVCF file
"""
job_name = f'Merge {len(gvcfs)} GVCFs, {sample_name}'
j = b.new_job(job_name)
j.image(PICARD_IMAGE)
j.cpu(2)
java_mem = 7
j.memory('standard') # ~ 4G/core ~ 7.5G
j.storage(f'{len(gvcfs) * 1.5 + 2}G')
j.declare_resource_group(
output_gvcf={
'g.vcf.gz': '{root}-' + sample_name + '.g.vcf.gz',
'g.vcf.gz.tbi': '{root}-' + sample_name + '.g.vcf.gz.tbi',
}
)
input_cmd = ' '.join(f'INPUT={g["g.vcf.gz"]}' for g in gvcfs)
j.command(
f"""set -e
(while true; do df -h; pwd; du -sh $(dirname {j.output_gvcf['g.vcf.gz']}); free -m; sleep 300; done) &
java -Xms{java_mem}g -jar /usr/picard/picard.jar \
MergeVcfs {input_cmd} OUTPUT={j.output_gvcf['g.vcf.gz']}
df -h; pwd; du -sh $(dirname {j.output_gvcf['g.vcf.gz']}); free -m
"""
)
if output_gvcf_path:
b.write_output(j.output_gvcf, output_gvcf_path.replace('.g.vcf.gz', ''))
return j | d89fd051cd20bef7263b600ce3513ba858acbadd | 12,354 |
def register_permission(name, codename, ctypes=None):
"""Registers a permission to the framework. Returns the permission if the
registration was successfully, otherwise False.
**Parameters:**
name
The unique name of the permission. This is displayed to the customer.
codename
The unique codename of the permission. This is used internally to
identify the permission.
content_types
The content type for which the permission is active. This can be
used to display only reasonable permissions for an object. This
must be a Django ContentType
"""
if ctypes is None:
ctypes = []
# Permission with same codename and/or name must not exist.
if Permission.objects.filter(Q(name=name) | Q(codename=codename)):
return False
p = Permission.objects.create(name=name, codename=codename)
ctypes = [ContentType.objects.get_for_model(ctype) for ctype in ctypes]
if ctypes:
p.content_types = ctypes
p.save()
return p | f09766685ac4690bd72739450977646d521a21d0 | 12,355 |
def calculate_outliers(tile_urls, num_outliers, cache, nprocs):
"""
Fetch tiles and calculate the outlier tiles per layer.
The number of outliers is per layer - the largest N.
Cache, if true, uses a local disk cache for the tiles. This can be very
useful if re-running percentile calculations.
Nprocs is the number of processes to use for both fetching and aggregation.
Even on a system with a single CPU, it can be worth setting this to a
larger number to make concurrent nework requests for tiles.
"""
def factory_fn():
return LargestN(num_outliers, cache)
if nprocs > 1:
results = parallel(
tile_urls, FactoryFunctionHolder(factory_fn), nprocs)
else:
results = sequential(tile_urls, factory_fn)
return results | 6e72820de2f954a9e349aa40d165817b3ab7c012 | 12,356 |
import random
def load_trigger_dataset(
fname,
templatizer,
limit=None,
train=False,
preprocessor_key=None,
priming_dataset=None,
max_priming_examples=64,
):
"""
Loads a MLM classification dataset.
Parameters
==========
fname : str
The filename.
templatizer : Templatizer
Maps instances to cloze-style model inputs.
limit : int
(optional) Limit the amount of data loaded.
train : bool
Whether the data is used for training. Default: False.
preprocessor_key : str
Key used to lookup preprocessor for data.
"""
if preprocessor_key is None:
preprocessor = PREPROCESSORS[fname.split('.')[-1]]
else:
preprocessor = PREPROCESSORS[preprocessor_key]
instances = []
for x in preprocessor(fname, train=train):
try:
model_inputs, label_id = templatizer(x, train=train)
if priming_dataset is not None:
model_inputs, label_id = prime(
model_inputs,
label_id,
priming_dataset,
model_max_length=templatizer._tokenizer.model_max_length,
max_priming_examples=max_priming_examples,
)
except ValueError as e:
logger.warning('Encountered error "%s" when processing "%s". Skipping.', e, x)
continue
else:
instances.append((model_inputs, label_id))
if limit:
limit = min(len(instances), limit)
return random.sample(instances, limit)
return instances | 6ed4970dd0031bd33cf19414f439c69e5d5a079a | 12,357 |
def pmu2bids(physio_files, verbose=False):
"""
Function to read a list of Siemens PMU physio files and
save them as a BIDS physiological recording.
Parameters
----------
physio_files : list of str
list of paths to files with a Siemens PMU recording
verbose : bool
verbose flag
Returns
-------
physio : PhysioData
PhysioData with the contents of the file
"""
# In case we are handled just a single file, make it a one-element list:
if isinstance(physio_files, str):
physio_files = [physio_files]
# Init PhysioData object to hold physio signals:
physio = PhysioData()
# Read the files from the list, extract the relevant information and
# add a new PhysioSignal to the list:
for f in physio_files:
physio_type, MDHTime, sampling_rate, physio_signal = readpmu(f, verbose=verbose)
testSamplingRate(
sampling_rate = sampling_rate,
Nsamples = len(physio_signal),
logTimes=MDHTime
)
# specify label:
if 'PULS' in physio_type:
physio_label = 'cardiac'
elif 'RESP' in physio_type:
physio_label = 'respiratory'
elif "TRIGGER" in physio_type:
physio_label = 'trigger'
else:
physio_label = physio_type
physio.append_signal(
PhysioSignal(
label=physio_label,
units='',
samples_per_second=sampling_rate,
physiostarttime=MDHTime[0],
signal=physio_signal
)
)
return physio | 41e607c80955689e5a189652ba445bf0014a3893 | 12,358 |
def add_chain(length):
"""Adds a chain to the network so that"""
chained_works = []
chain = utils.generate_chain(length)
for i in range(len(chain)-1):
agent_id = get_random_agent().properties(ns.KEY_AGENT_ID).value().next()
work_id = g.create_work().properties(ns.KEY_WORK_ID).value().next()
g.agent(agent_id).owns_work(g.work(work_id)).next()
item1 = g.create_item(chain[i])
g.agent(agent_id).works(work_id).demands(item1).next()
item2 = g.create_item(chain[i+1])
g.agent(agent_id).works(work_id).offers(item2).next()
chained_works.append(work_id)
return chained_works | 80a176fb34460404c847f00dbeab963f1a0be71e | 12,359 |
from vartools.result import re_fit_data
import sys
def re_fit(file_name, top_c, bot_c):
""" re-fits a prepared oocyte file (-t and -b flags for top and bot constraints)"""
if top_c == "True":
top_c = True
elif top_c == "False":
top_c = False
else:
sys.exit("Invalid option: " + top_c)
if bot_c == "True":
bot_c = True
elif bot_c == "False":
bot_c = False
else:
sys.exit("Invalid option: " + bot_c)
re_fit_data(file_name, top_c, bot_c)
return None | e231c2415ce0ea54fd349f79f78783f71911bd8c | 12,360 |
def convert_graph_to_db_format(input_graph: nx.Graph, with_weights=False, cast_to_directed=False):
"""Converts a given graph into a DB format, which consists of two or three lists
1. **Index list:** a list where the i-th position contains the index of the beginning of the list of adjacent nodes (in the second list).
2. **Node list:** for each node, we list (in order) all the nodes which are adjacent to it.
3. **Weight list:** if the weight parameter is True, includes the weights of the edges, corresponds to the nodes list
**Assumptions:**
The code has several preexisting assumptions:
a) The nodes are labeled with numbers
b) Those numbers are the sequence [0,...,num_of_nodes-1]
c) If there are weights, they are floats
d) If there are weights, they are initialized for all edges
e) If there are weights, the weight key is 'weight'
.. Note::
The code behaves differently for directed and undirected graphs.
For undirected graph, every edge is actually counted twice (p->q and q->p).
Example::
For the simple directed graph (0->1, 0->2,0->3,2->0,3->1,3->2):
`Indices: [0, 3, 3, 4, 6]`
`Neighbors: [1, 2, 3, 0, 1, 2]`
Note that index[1] is the same as index[2]. That is because 1 has no neighbors, and so his neighbor list is of size 0, but we still need to have an index for the node on.
For the same graph when it is undirected:
`Indices: [0, 3, 5, 7, 10]`
`Neighbors: [1, 2, 3, 0, 3, 0, 3, 0, 1, 2]`
Note that the number of edges isn't doubled because in the directed version there is a bidirectional edge.
:param graph: the nx.Graph object to convert
:param with_weights: whether to create a weight list. Defaults to False.
:param cast_to_directed: whether to cast the graph into a directed format
:return: two or three lists: index,nodes, [weights]
"""
if cast_to_directed:
graph = input_graph.to_directed()
else:
graph = input_graph.copy()
if graph.is_directed():
# Color printing taken from https://www.geeksforgeeks.org/print-colors-python-terminal/
print("\033[93m {}\033[00m".format('Note that the graph is processed as a directed graph'))
indices = [0] # The first neighbor list always starts at index 0
neighbor_nodes = []
nodes = [node for node in graph.nodes()]
# print(nodes)
nodes.sort()
neighbors = [sorted([x for x in graph.neighbors(node)]) for node in nodes]
# Create the indices and neighbor nodes lists
for neighbor_list in neighbors:
neighbor_list.sort()
# print(neighbor_list)
neighbor_nodes.extend(neighbor_list)
indices.append(indices[-1] + len(neighbor_list))
if with_weights:
try:
weights = [0] * len(neighbor_nodes)
current_index = 0
for node in nodes:
for x in neighbors[node]:
w = graph[node][x]['weight']
weights[current_index] = w
current_index += 1
return indices, neighbor_nodes, weights
except KeyError:
# Print in red
print("\033[91m {}\033[00m".format('No weights defined, returning an empty list of weights'))
print()
return indices, neighbor_nodes, []
return indices, neighbor_nodes | 3f538f697df16b13aeb513dd60831a1252fffb6c | 12,361 |
def auxiliary_subfields():
"""Factory associated with AuxSubfieldsPoroelasticity.
"""
return AuxSubfieldsPoroelasticity() | bcbdaf5b6ee006a6380206ebd331f7e516593b83 | 12,362 |
def cassandra_get_unit_data():
"""
Basing function to obtain units from db and return as dict
:return: dictionary of units
"""
kpi_dict = {}
cassandra_cluster = Cluster()
session = cassandra_cluster.connect('pb2')
query = session.prepare('SELECT * FROM kpi_units')
query_data = session.execute(query)
for row in query_data:
kpi_dict[row[1]] = [row[0], row[2], row[3], row[4]]
return kpi_dict | ab24e4e09f648a74cd16a140279da54aab3d4096 | 12,363 |
def read_cfg_float(cfgp, section, key, default):
"""
Read float from a config file
Args:
cfgp: Config parser
section: [section] of the config file
key: Key to be read
default: Value if couldn't be read
Returns: Resulting float
"""
if cfgp.has_option(section, key):
return cfgp.getfloat(section, key)
else:
return default | 0ed341c2d1436e3378e4e126735ac7306973ca8c | 12,364 |
def random(website):
"""
随机获取cookies
:param website:查询网站给 如:weibo
:return:随机获取的cookies
"""
g = get_conn()
cookies = getattr(g, website + '_cookies').random()
return cookies | 6db8d81f18e57af2a7d9294481e45d4ad38962ce | 12,365 |
import requests
def get_pid(referral_data):
""" Example getting PID using the same token used to query AD
NOTE! to get PID the referral information must exist in the BETA(UAT) instance of TOMS
"""
referral_uid = referral_data['referral_uid']
url = "https://api.beta.genomics.nhs.uk/reidentification/referral-pid/{referral_uid}".format(referral_uid=referral_uid)
auth_header = {'Authorization': 'Bearer {}'.format(jwt_token)}
pid = requests.get(url, headers=auth_header).json()
return pid | 8e5e43c1a2c85826e03f0fd090fc235b0320aed7 | 12,366 |
from typing import Union
from pathlib import Path
from typing import Tuple
from typing import List
from datetime import datetime
def open_events(
fname: Union[Path, str], leap_sec: float, get_frame_rate: bool = False
) -> Tuple[
List[float], List[float], List[float], List[datetime], Union[List[float], None]
]:
"""
Parameters
----------
fname : Path or str
filename of *_events.pos file
leap_sec : float
The current leap second used to convert GPS time to UTC time
get_frame_rate : bool [default=False]
Whether to return the frame rate of sequential trigger events
Returns
-------
lat : List[float]
Latitudes (decimal degrees) of trigger events recorded by Reach M2
lon : List[float]
Longitudes (decimal degrees) of trigger events recorded by Reach M2
height : List[float]
Ellipsoid heights of trigger events recorded by Reach M2
dt_ls : List[datetime]
datetime (UTC) of trigger events recorded by Reach M2
reach_frate : List[float] or None
if get_frame_rate is True:
reach_frate -> frame rate (seconds) of trigger events recorded
by Reach M2
if get_frame_rate is False:
reach_frate = None
"""
with open(fname, encoding="utf-8") as fid:
contents = fid.readlines()
lat, lon, height, dt_ls = [], [], [], []
reach_frate = [] if get_frame_rate else None
cnt = 0
for i in range(len(contents)):
if contents[i].startswith("%"):
continue
row = contents[i].strip().split()
dt = datetime_from_event_text(row[0], row[1], leap_sec)
if cnt > 0:
reach_frate.append((dt - prev_dt).total_seconds()) # noqa
lat.append(float(row[2]))
lon.append(float(row[3]))
height.append(float(row[4]))
dt_ls.append(dt)
prev_dt = dt # noqa
cnt += 1
return lat, lon, height, dt_ls, reach_frate | 973b835b1df2aafba1a535b378434b6a532584d0 | 12,367 |
def intdags_permutations(draw, min_size:int=1, max_size:int=10):
""" Produce instances of a same DAG. Instances are not nesessarily
topologically sorted """
return draw(lists(permutations(draw(intdags())),
min_size=min_size,
max_size=max_size)) | 50377412dbd091afa98761e673a35f44acbeb60d | 12,368 |
def getConfiguredGraphClass(doer):
"""
In this class method, we must return a configured graph class
"""
# if options.bReified:
# DU_GRAPH = Graph_MultiSinglePageXml_Segmenter_Separator_DOM
if options.bSeparator:
DU_GRAPH = ConjugateSegmenterGraph_MultiSinglePageXml_Separator
else:
DU_GRAPH = ConjugateSegmenterGraph_MultiSinglePageXml
ntClass = My_ConjugateNodeType
if options.bBB2:
nt = ntClass("mi_clstr" #some short prefix because labels below are prefixed with it
, [] # in conjugate, we accept all labels, andNone becomes "none"
, []
, False # unused
, BBoxDeltaFun = None
, bPreserveWidth=True
)
elif options.bBB31:
nt = ntClass("mi_clstr" #some short prefix because labels below are prefixed with it
, [] # in conjugate, we accept all labels, andNone becomes "none"
, []
, False # unused
, BBoxDeltaFun = (None, lambda v: v * 0.066*3) # shrink to 60% of its size
, bPreserveWidth=True
)
else:
nt = ntClass("mi_clstr" #some short prefix because labels below are prefixed with it
, [] # in conjugate, we accept all labels, andNone becomes "none"
, []
, False # unused
, BBoxDeltaFun =lambda v: max(v * 0.066, min(5, v/3)) #we reduce overlap in this way
)
nt.setLabelAttribute("id")
## HD added 23/01/2020: needed for output generation
DU_GRAPH.clusterType='paragraph'
nt.setXpathExpr(( ".//pc:TextLine"
, "./pc:TextEquiv") #how to get their text
)
DU_GRAPH.addNodeType(nt)
return DU_GRAPH | 3089572eb1aa4e7db505b5211d156d3e044aaed5 | 12,369 |
def find_amped_polys_for_syntheticidle(qubit_filter, idleStr, model, singleQfiducials=None,
prepLbl=None, effectLbls=None, initJ=None, initJrank=None,
wrtParams=None, algorithm="greedy", require_all_amped=True,
idtPauliDicts=None, comm=None, verbosity=0):
"""
Find fiducial pairs which amplify the parameters of a synthetic idle gate.
This routine is primarily used internally within higher-level n-qubit
sequence selection routines.
Parameters
----------
qubit_filter : list
A list specifying which qubits fiducial pairs should be placed upon.
Typically this is a subset of all the qubits, as the synthetic idle
is composed of nontrivial gates acting on a localized set of qubits
and noise/errors are localized around these.
idleStr : Circuit
The operation sequence specifying the idle operation to consider. This may
just be a single idle gate, or it could be multiple non-idle gates
which together act as an idle.
model : Model
The model used to compute the polynomial expressions of probabilities
to first-order. Thus, this model should always have (simulation)
type "termorder".
singleQfiducials : list, optional
A list of gate-name tuples (e.g. `('Gx',)`) which specify a set of single-
qubit fiducials to use when trying to amplify gate parameters. Note that
no qubit "state-space" label is required here (i.e. *not* `(('Gx',1),)`);
the tuples just contain single-qubit gate *names*. If None, then
`[(), ('Gx',), ('Gy',)]` is used by default.
prepLbl : Label, optional
The state preparation label to use. If None, then the first (and
usually the only) state prep label of `model` is used, so it's
usually fine to leave this as None.
effectLbls : list, optional
The list of POVM effect labels to use, as a list of `Label` objects.
These are *simplified* POVM effect labels, so something like "Mdefault_0",
and if None the default is all the effect labels of the first POVM of
`model`, which is usually what you want.
initJ : numpy.ndarray, optional
An initial Jacobian giving the derivatives of some other polynomials
with respect to the same `wrtParams` that this function is called with.
This acts as a starting point, and essentially informs the fiducial-pair
selection algorithm that some parameters (or linear combos of them) are
*already* amplified (e.g. by some other germ that's already been
selected) and for which fiducial pairs are not needed.
initJrank : int, optional
The rank of `initJ`. The function could compute this from `initJ`
but in practice one usually has the rank of `initJ` lying around and
so this saves a call to `np.linalg.matrix_rank`.
wrtParams : slice, optional
The parameters to consider for amplification. (This function seeks
fiducial pairs that amplify these parameters.) If None, then pairs
which amplify all of `model`'s parameters are searched for.
algorithm : {"greedy","sequential"}
Which algorithm is used internally to find fiducial pairs. "greedy"
will give smaller sets of fiducial pairs (better) but takes longer.
Usually it's worth the wait and you should use the default ("greedy").
require_all_amped : bool, optional
If True and AssertionError is raised when fewer than all of the
requested parameters (in `wrtParams`) are amplifed by the final set of
fiducial pairs.
verbosity : int, optional
The level of detail printed to stdout. 0 means silent.
Returns
-------
J : numpy.ndarray
The final jacobian with rows equal to the number of chosen amplified
polynomials (note there is one row per fiducial pair *including* the
outcome - so there will be two different rows for two different
outcomes) and one column for each parameter specified by `wrtParams`.
Jrank : int
The rank of the jacobian `J`, equal to the number of amplified
parameters (at most the number requested).
fidpair_lists : list
The selected fiducial pairs, each in "gatename-fidpair-list" format.
Elements of `fidpair_lists` are themselves lists, all of length=#qubits.
Each element of these lists is a (prep1Qnames, meas1Qnames) 2-tuple
specifying the 1-qubit gates (by *name* only) on the corresponding qubit.
For example, the single fiducial pair prep=Gx:1Gy:2, meas=Gx:0Gy:0 in a
3-qubit system would have `fidpair_lists` equal to:
`[ [ [(),('Gx','Gy')], [('Gx',), () ], [('Gy',), () ] ] ]`
` < Q0 prep,meas >, < Q1 prep,meas >, < Q2 prep,meas >`
"""
#Note: "useful" fiducial pairs are identified by looking at the rank of a
# Jacobian matrix. Each row of this Jacobian is the derivative of the
# "amplified polynomial" - the L=1 polynomial for a fiducial pair (i.e.
# pr_poly(F1*(germ)*F2) ) minus the L=0 polynomial (i.e. pr_poly(F1*F2) ).
# When the model only gives probability polynomials to first order in
# the error rates this gives the L-dependent and hence amplified part
# of the polynomial expression for the probability of F1*(germ^L)*F2.
# This derivative of an amplified polynomial, taken with respect to
# all the parameters we care about (i.e. wrtParams) would ideally be
# kept as a polynomial and the "rank" of J would be the number of
# linearly independent polynomials within the rows of J (each poly
# would be a vector in the space of polynomials). We currently take
# a cheap/HACK way out and evaluate the derivative-polynomial at a
# random dummy value which should yield linearly dependent vectors
# in R^n whenever the polynomials are linearly indepdendent - then
# we can use the usual scipy/numpy routines for computing a matrix
# rank, etc.
# Assert that model uses termorder, as doing L1-L0 to extract the "amplified" part
# relies on only expanding to *first* order.
assert(model._sim_type == "termorder" and model._sim_args['max_order'] == 1), \
'`model` must use "termorder:1" simulation type!'
printer = _VerbosityPrinter.build_printer(verbosity, comm)
if prepLbl is None:
prepLbl = model._shlp.get_default_prep_lbl()
if effectLbls is None:
povmLbl = model._shlp.get_default_povm_lbl(sslbls=None)
effectLbls = [_Lbl("%s_%s" % (povmLbl, l))
for l in model._shlp.get_effect_labels_for_povm(povmLbl)]
if singleQfiducials is None:
# TODO: assert model has Gx and Gy gates?
singleQfiducials = [(), ('Gx',), ('Gy',)] # ('Gx','Gx')
#dummy = 0.05*_np.ones(model.num_params(),'d') # for evaluating derivs...
#dummy = 0.05*_np.arange(1,model.num_params()+1) # for evaluating derivs...
#dummy = 0.05*_np.random.random(model.num_params())
dummy = 5.0 * _np.random.random(model.num_params()) + 0.5 * _np.ones(model.num_params(), 'd')
# expect terms to be either coeff*x or coeff*x^2 - (b/c of latter case don't eval at zero)
#amped_polys = []
selected_gatename_fidpair_lists = []
if wrtParams is None: wrtParams = slice(0, model.num_params())
Np = _slct.length(wrtParams)
if initJ is None:
J = _np.empty((0, Np), 'complex'); Jrank = 0
else:
J = initJ; Jrank = initJrank
if algorithm == "greedy":
Jrows = _np.empty((len(effectLbls), Np), 'complex')
#Outer iteration
while Jrank < Np:
if algorithm == "sequential":
printer.log("Sequential find_amped_polys_for_syntheticidle started. Target rank=%d" % Np)
assert(comm is None), "No MPI support for algorithm='sequential' case!"
elif algorithm == "greedy":
maxRankInc = 0
bestJrows = None
printer.log("Greedy find_amped_polys_for_syntheticidle started. Target rank=%d" % Np)
else: raise ValueError("Invalid `algorithm` argument: %s" % algorithm)
# loop over all possible (remaining) fiducial pairs
nQubits = len(qubit_filter)
loc_Indices, _, _ = _mpit.distribute_indices(
list(range(len(singleQfiducials)**nQubits)), comm, False)
loc_itr = 0; nLocIters = len(loc_Indices)
#print("DB: Rank %d indices = " % comm.Get_rank(), loc_Indices)
with printer.progress_logging(2):
for itr, prep in enumerate(_itertools.product(*([singleQfiducials] * nQubits))):
# There's probably a cleaner way to do this,
if loc_itr < len(loc_Indices) and itr == loc_Indices[loc_itr]:
loc_itr += 1 # but this limits us to this processor's local indices
else:
continue
#print("DB: Rank %d: running itr=%d" % (comm.Get_rank(), itr))
printer.show_progress(loc_itr, nLocIters, prefix='--- Finding amped-polys for idle: ')
prepFid = _objs.Circuit(())
for i, el in enumerate(prep):
prepFid = prepFid + _onqubit(el, qubit_filter[i])
for meas in _itertools.product(*([singleQfiducials] * nQubits)):
if idtPauliDicts is not None:
# For idle tomography compatibility, only consider fiducial pairs with either
# all-the-same or all-different prep & measure basis (basis is determined
# by the *last* letter in the value, e.g. ignore '-' sign in '-X').
prepDict, measDict = idtPauliDicts
rev_prepDict = {v[-1]: k for k, v in prepDict.items()} # could do this once above,
rev_measDict = {v[-1]: k for k, v in measDict.items()} # but this isn't the bottleneck.
cmp = [(rev_prepDict[prep[kk]] == rev_measDict[meas[kk]]) for kk in range(nQubits)]
# if all are not the same or all are not different, skip
if not (all(cmp) or not any(cmp)): continue
measFid = _objs.Circuit(())
for i, el in enumerate(meas):
measFid = measFid + _onqubit(el, qubit_filter[i])
gatename_fidpair_list = [(prep[i], meas[i]) for i in range(nQubits)]
if gatename_fidpair_list in selected_gatename_fidpair_lists:
continue # we've already chosen this pair in a previous iteration
gstr_L0 = prepFid + measFid # should be a Circuit
gstr_L1 = prepFid + idleStr + measFid # should be a Circuit
ps = model._fwdsim().prs_as_polys(prepLbl, effectLbls, gstr_L1)
qs = model._fwdsim().prs_as_polys(prepLbl, effectLbls, gstr_L0)
if algorithm == "sequential":
added = False
for elbl, p, q in zip(effectLbls, ps, qs):
amped = p + -1 * q # the amplified poly
Jrow = _np.array([[amped.deriv(iParam).evaluate(dummy)
for iParam in _slct.as_array(wrtParams)]])
if _np.linalg.norm(Jrow) < 1e-8: continue # row of zeros can fool matrix_rank
Jtest = _np.concatenate((J, Jrow), axis=0)
testRank = _np.linalg.matrix_rank(Jtest, tol=RANK_TOL)
if testRank > Jrank:
printer.log("fidpair: %s,%s (%s) increases rank => %d" %
(str(prep), str(meas), str(elbl), testRank), 4)
J = Jtest
Jrank = testRank
if not added:
selected_gatename_fidpair_lists.append(gatename_fidpair_list)
added = True # only add fidpair once per elabel loop!
if Jrank == Np: break # this is the largest rank J can take!
elif algorithm == "greedy":
#test adding all effect labels - get the overall increase in rank due to this fidpair
for k, (elbl, p, q) in enumerate(zip(effectLbls, ps, qs)):
amped = p + -1 * q # the amplified poly
Jrows[k, :] = _np.array([[amped.deriv(iParam).evaluate(dummy)
for iParam in _slct.as_array(wrtParams)]])
Jtest = _np.concatenate((J, Jrows), axis=0)
testRank = _np.linalg.matrix_rank(Jtest, tol=RANK_TOL)
rankInc = testRank - Jrank
if rankInc > maxRankInc:
maxRankInc = rankInc
bestJrows = Jrows.copy()
bestFidpair = gatename_fidpair_list
if testRank == Np: break # this is the largest rank we can get!
if algorithm == "greedy":
# get the best of the bestJrows, bestFidpair, and maxRankInc
if comm is not None:
maxRankIncs_per_rank = comm.allgather(maxRankInc)
iWinningRank = maxRankIncs_per_rank.index(max(maxRankIncs_per_rank))
maxRankInc = maxRankIncs_per_rank[iWinningRank]
if comm.Get_rank() == iWinningRank:
comm.bcast(bestJrows, root=iWinningRank)
comm.bcast(bestFidpair, root=iWinningRank)
else:
bestJrows = comm.bcast(None, root=iWinningRank)
bestFidpair = comm.bcast(None, root=iWinningRank)
if require_all_amped:
assert(maxRankInc > 0), "No fiducial pair increased the Jacobian rank!"
Jrank += maxRankInc
J = _np.concatenate((J, bestJrows), axis=0)
selected_gatename_fidpair_lists.append(bestFidpair)
printer.log("%d fidpairs => rank %d (Np=%d)" %
(len(selected_gatename_fidpair_lists), Jrank, Np))
#DEBUG
#print("DB: J = ")
#_gt.print_mx(J)
#print("DB: svals of J for synthetic idle: ", _np.linalg.svd(J, compute_uv=False))
return J, Jrank, selected_gatename_fidpair_lists | 47d7f684047a5b0379febe2fd64e964f805a2764 | 12,370 |
def _seed(x, deg=5, seeds=None):
"""Seed the greedy algorithm with (deg+1) evenly spaced indices"""
if seeds is None:
f = lambda m, n: [ii*n//m + n//(2*m) for ii in range(m)]
indices = np.sort(np.hstack([[0, len(x)-1], f(deg-1, len(x))]))
else:
indices = seeds
errors = []
return indices, errors | 7a5ff1e2e27b812f17196fbec1d7c6a2c867207c | 12,371 |
def get_ref(cube):
"""Gets the 8 reflection symmetries of a nd numpy array"""
L = []
L.append(cube[:,:,:])
L.append(cube[:,:,::-1])
L.append(cube[:,::-1,:])
L.append(cube[::-1,:,:])
L.append(cube[:,::-1,::-1])
L.append(cube[::-1,:,::-1])
L.append(cube[::-1,::-1,:])
L.append(cube[::-1,::-1,::-1])
return L | 683ef2c7c0a312e4cf891f191452f9c29f6bc1fd | 12,372 |
from typing import Collection
from typing import Tuple
from typing import Optional
from typing import Mapping
def get_relation_functionality(
mapped_triples: Collection[Tuple[int, int, int]],
add_labels: bool = True,
label_to_id: Optional[Mapping[str, int]] = None,
) -> pd.DataFrame:
"""Calculate relation functionalities.
:param mapped_triples:
The ID-based triples.
:return:
A dataframe with columns ( functionality | inverse_functionality )
"""
df = pd.DataFrame(data=mapped_triples, columns=["h", "r", "t"])
df = df.groupby(by="r").agg(dict(
h=["nunique", COUNT_COLUMN_NAME],
t="nunique",
))
df[FUNCTIONALITY_COLUMN_NAME] = df[("h", "nunique")] / df[("h", COUNT_COLUMN_NAME)]
df[INVERSE_FUNCTIONALITY_COLUMN_NAME] = df[("t", "nunique")] / df[("h", COUNT_COLUMN_NAME)]
df = df[[FUNCTIONALITY_COLUMN_NAME, INVERSE_FUNCTIONALITY_COLUMN_NAME]]
df.columns = df.columns.droplevel(1)
df.index.name = RELATION_ID_COLUMN_NAME
df = df.reset_index()
return add_relation_labels(df, add_labels=add_labels, label_to_id=label_to_id) | 1e6aa6d9e61ebd788d8c1726ca8a75d551b654b8 | 12,373 |
import json
def df_to_vega_lite(df, path=None):
"""
Export a pandas.DataFrame to a vega-lite data JSON.
Params
------
df : pandas.DataFrame
dataframe to convert to JSON
path : None or str
if None, return the JSON str. Else write JSON to the file specified by
path.
"""
chart = altair.Chart(data=df)
data = chart.to_dict()['data']['values']
if path is None:
return json.dumps(data, **json_dump_kwargs)
with open(path, 'w') as write_file:
json.dump(data, write_file, **json_dump_kwargs) | 5cf5cf834d4113c05c4cc8b99aaa2a94e0a7b746 | 12,374 |
def _is_json_mimetype(mimetype):
"""Returns 'True' if a given mimetype implies JSON data."""
return any(
[
mimetype == "application/json",
mimetype.startswith("application/") and mimetype.endswith("+json"),
]
) | 9c2580ff4a783d9f79d6f6cac41befb516c52e9f | 12,375 |
from datetime import datetime
def make_request(action, data, token):
"""Make request based on passed arguments and timestamp."""
return {
'action': action,
'time': datetime.now().timestamp(),
'data': data,
'token': token
} | 60e511f7b067595bd698421adaafe37bbf8e59e1 | 12,376 |
def get_stats_historical_prices(timestamp, horizon):
"""
We assume here that the price is a random variable following a normal
distribution. We compute the mean and covariance of the price distribution.
"""
hist_prices_df = pd.read_csv(HISTORICAL_PRICES_CSV)
hist_prices_df["timestamp"] = pd.to_datetime(hist_prices_df["timestamp"])
hist_prices_df = hist_prices_df.set_index("timestamp")
start = pd.Timestamp(year=2018,
month=6,
day=2,
hour=timestamp.hour,
minute=timestamp.minute)
end = pd.Timestamp(year=2018,
month=10,
day=25,
hour=timestamp.hour,
minute=timestamp.minute)
hist_prices_df = hist_prices_df[
(hist_prices_df.index >= start) &
(hist_prices_df.index < end)
]
hist_prices_df['hour'] = hist_prices_df.index.hour
hist_prices_df['minute'] = hist_prices_df.index.minute
num_features = horizon
num_samples = min(hist_prices_df.groupby(
[hist_prices_df.index.hour, hist_prices_df.index.minute]
).count()['clearing_price'].values)
new = hist_prices_df.groupby(
[hist_prices_df.index.hour, hist_prices_df.index.minute]
).mean()
new = new.set_index(pd.Index(range(48)))
i = new[
(new.hour == timestamp.hour) & (new.minute == timestamp.minute)
]['clearing_price'].index.values[0]
a = new[new.index >= i]['clearing_price']
b = new[new.index < i]['clearing_price']
mean_X = np.concatenate((a, b))
X = np.copy(hist_prices_df['clearing_price'].values)
X = np.reshape(X, (num_samples, num_features))
cov = GaussianMixture(covariance_type='tied').fit(
normalize(X)).covariances_
return mean_X, cov | bc6fdcbcb54f156d880ba2504a0ca0d50f889786 | 12,377 |
def _unflattify(values, shape):
"""
Unflattifies parameter values.
:param values: The flattened array of values that are to be unflattified
:type values: torch.Tensor
:param shape: The shape of the parameter prior
:type shape: torch.Size
:rtype: torch.Tensor
"""
if len(shape) < 1 or values.shape[1:] == shape:
return values
return values.reshape(values.shape[0], *shape) | e885517419eb48fd1a4ebdf14a8fa3b19f3c5444 | 12,378 |
def theme_cmd(data, buffer, args):
"""Callback for /theme command."""
if args == '':
weechat.command('', '/help ' + SCRIPT_COMMAND)
return weechat.WEECHAT_RC_OK
argv = args.strip().split(' ', 1)
if len(argv) == 0:
return weechat.WEECHAT_RC_OK
if argv[0] in ('install',):
weechat.prnt('',
'{0}: action "{1}" not developed'
''.format(SCRIPT_NAME, argv[0]))
return weechat.WEECHAT_RC_OK
# check arguments
if len(argv) < 2:
if argv[0] in ('install', 'installfile', 'save', 'export'):
weechat.prnt('',
'{0}: too few arguments for action "{1}"'
''.format(SCRIPT_NAME, argv[0]))
return weechat.WEECHAT_RC_OK
# execute asked action
if argv[0] == 'list':
theme_list(argv[1] if len(argv) >= 2 else '')
elif argv[0] == 'info':
filename = None
if len(argv) >= 2:
filename = argv[1]
theme = Theme(filename)
if filename:
theme.info('Info about theme "{0}":'.format(filename))
else:
theme.info('Info about current theme:')
elif argv[0] == 'show':
filename = None
if len(argv) >= 2:
filename = argv[1]
theme = Theme(filename)
if filename:
theme.show('Content of theme "{0}":'.format(filename))
else:
theme.show('Content of current theme:')
elif argv[0] == 'installfile':
theme = Theme()
theme.save(theme_config_get_undo())
theme = Theme(argv[1])
if theme.isok():
theme.install()
elif argv[0] == 'update':
theme_update()
elif argv[0] == 'undo':
theme = Theme(theme_config_get_undo())
if theme.isok():
theme.install()
elif argv[0] == 'save':
theme = Theme()
theme.save(argv[1])
elif argv[0] == 'backup':
theme = Theme()
theme.save(theme_config_get_backup())
elif argv[0] == 'restore':
theme = Theme(theme_config_get_backup())
if theme.isok():
theme.install()
elif argv[0] == 'export':
htheme = HtmlTheme()
whitebg = False
htmlfile = argv[1]
argv2 = args.strip().split(' ', 2)
if len(argv2) >= 3 and argv2[1] == 'white':
whitebg = True
htmlfile = argv2[2]
htheme.save_html(htmlfile, whitebg)
return weechat.WEECHAT_RC_OK | f361a56392320efac4bd1e4101b002c1e42d4b89 | 12,379 |
def get_unique_chemical_names(reagents):
"""Get the unique chemical species names in a list of reagents.
The concentrations of these species define the vector space in which we sample possible experiments
:param reagents: a list of perovskitereagent objects
:return: a list of the unique chemical names in all of the reagent
"""
chemical_species = set()
if isinstance(reagents, dict):
reagents = [v for v in reagents.values()]
for reagent in reagents:
chemical_species.update(reagent.chemicals)
return sorted(list(chemical_species)) | ae5d6b3bdd8e03c47b9c19c900760c8c2b83d0a0 | 12,380 |
def get_sorted_keys(dict_to_sort):
"""Gets the keys from a dict and sorts them in ascending order.
Assumes keys are of the form Ni, where N is a letter and i is an integer.
Args:
dict_to_sort (dict): dict whose keys need sorting
Returns:
list: list of sorted keys from dict_to_sort
"""
sorted_keys = list(dict_to_sort.keys())
sorted_keys.sort(key=lambda x: int(x[1:]))
return sorted_keys | 9614dee83723e21248381c61a60e92e78c121216 | 12,381 |
def model_3d(psrs, psd='powerlaw', noisedict=None, components=30,
gamma_common=None, upper_limit=False, bayesephem=False,
wideband=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 3D from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. GWB with HD correlations modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Monopole signal modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
3. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum'] 'powerlaw' is default
value.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
Tspan = model_utils.get_tspan(psrs)
# red noise
s = red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='hd', name='gw')
# monopole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='monopole', name='monopole')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# timing model
s += gp_signals.TimingModel()
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not wideband:
s2 = s + white_noise_block(vary=False, inc_ecorr=True)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=False, inc_ecorr=False)
models.append(s3(p))
# set up PTA
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta | 37abad1016fadd82bcff1a55e9835db28a5c4eb8 | 12,382 |
def max_votes(x):
"""
Return the maximum occurrence of predicted class.
Notes
-----
If number of class 0 prediction is equal to number of class 1 predictions, NO_VOTE will be returned.
E.g.
Num_preds_0 = 25,
Num_preds_1 = 25,
Num_preds_NO_VOTE = 0,
returned vote : "NO_VOTE".
"""
if x['Num_preds_0'] > x['Num_preds_1'] and x['Num_preds_0'] > x['Num_preds_NO_VOTE']:
return 0
elif x['Num_preds_1'] > x['Num_preds_0'] and x['Num_preds_1'] > x['Num_preds_NO_VOTE']:
return 1
else:
return 'NO_VOTE' | 2eadafdaf9e9b4584cd81685a5c1b77a090e4f1c | 12,383 |
def misclassification_error(y_true: np.ndarray, y_pred: np.ndarray, normalize: bool = True) -> float:
"""
Calculate misclassification loss
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
normalize: bool, default = True
Normalize by number of samples or not
Returns
-------
Misclassification of given predictions
"""
n = y_true.shape[-1]
counter = np.ones_like(y_true)
error = counter[y_true!=y_pred].sum(axis=-1)
return error / n if normalize else error | 676657fa4da7b4734077ba3a19878d8890f44815 | 12,384 |
from scipy.stats import uniform
def dunif(x, minimum=0,maximum=1):
"""
Calculates the point estimate of the uniform distribution
"""
result=uniform.pdf(x=x,loc=minimum,scale=maximum-minimum)
return result | 980ffb875cefec13bb78c3a3c779c68e7f510fb7 | 12,385 |
def _generate_upsert_sql(mon_loc):
"""
Generate SQL to insert/update.
"""
mon_loc_db = [(k, _manipulate_values(v, k in TIME_COLUMNS)) for k, v in mon_loc.items()]
all_columns = ','.join(col for (col, _) in mon_loc_db)
all_values = ','.join(value for (_, value) in mon_loc_db)
update_query = ','.join(f"{k}={v}" for (k, v) in mon_loc_db if k not in ['AGENCY_CD', 'SITE_NO'])
statement = (
f"MERGE INTO GW_DATA_PORTAL.WELL_REGISTRY_STG a "
f"USING (SELECT '{mon_loc['AGENCY_CD']}' AGENCY_CD, '{mon_loc['SITE_NO']}' "
f"SITE_NO FROM DUAL) b ON (a.AGENCY_CD = b.AGENCY_CD AND a.SITE_NO = b.SITE_NO) "
f"WHEN MATCHED THEN UPDATE SET {update_query} WHEN NOT MATCHED THEN INSERT ({all_columns}) VALUES ({all_values})"
)
return statement | 7cbfdc1dd8709a354e4e246324042c8cf02a703b | 12,386 |
import functools
def filtered_qs(func):
"""
#TODO: zrobić, obsługę funkcji z argumentami
:param func:
:return:
"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
ret_qs = func(self)
return ret_qs.filter(*args, **kwargs)
return wrapped | 5d3330c44fc9e7f9bffc74d27a3ebb69bc35944e | 12,387 |
def dict2obj(d):
"""Given a dictionary, return an object with the keys mapped to attributes
and the values mapped to attribute values. This is recursive, so nested
dictionaries are nested objects."""
top = type('dict2obj', (object,), d)
seqs = tuple, list, set, frozenset
for k, v in d.items():
if isinstance(v, dict):
setattr(
top,
k, dict2obj(v)
)
elif isinstance(v, seqs):
setattr(
top,
k, type(v)(dict2obj(sj) if isinstance(sj, dict) else sj for sj in v)
)
else:
setattr(top, k, v)
return top | ccfa713dc130024427872eb6f2017a0383e3bc01 | 12,388 |
def customized_algorithm_plot(experiment_name='finite_simple_sanity', data_path=_DEFAULT_DATA_PATH):
"""Simple plot of average instantaneous regret by agent, per timestep.
Args:
experiment_name: string = name of experiment config.
data_path: string = where to look for the files.
Returns:
p: ggplot plot
"""
df = load_data(experiment_name, data_path)
plt_df = (df.groupby(['t', 'agent'])
.agg({'instant_regret': np.mean})
.reset_index())
plt_df['agent_new_name'] = plt_df.agent.apply(rename_agent)
custom_labels = ['Laplace TS','Langevin TS','TS','bootstrap TS']
custom_colors = ["#E41A1C","#377EB8","#4DAF4A","#984EA3"]
p = (gg.ggplot(plt_df)
+ gg.aes('t', 'instant_regret', colour='agent_new_name')
+ gg.geom_line(size=1.25, alpha=0.75)
+ gg.xlab('time period (t)')
+ gg.ylab('per-period regret')
+ gg.scale_color_manual(name='agent', labels = custom_labels,values=custom_colors))
return p | bd046c14de1598672391bbcb134dfe8bcff0b558 | 12,389 |
def _get_log_time_scale(units):
"""Retrieves the ``log10()`` of the scale factor for a given time unit.
Args:
units (str): String specifying the units
(one of ``'fs'``, ``'ps'``, ``'ns'``, ``'us'``, ``'ms'``, ``'sec'``).
Returns:
The ``log10()`` of the scale factor for the time unit.
"""
scale = {"fs": -15, "ps": -12, "ns": -9, "us": -6, "ms": -3, "sec": 0}
units_lwr = units.lower()
if units_lwr not in scale:
raise ValueError(f"Invalid unit ({units}) provided")
else:
return scale[units_lwr] | 2371aab923aacce9159bce6ea1470ed49ef2c72f | 12,390 |
def resolvermatch(request):
"""Add the name of the currently resolved pattern to the RequestContext"""
match = resolve(request.path)
if match:
return {'resolved': match}
else:
return {} | 41cc88633e0b207a53318c761c9849ad2d079994 | 12,391 |
def selection_sort(arr: list) -> list:
"""
Main sorting function. Using "find_smallest" function as part
of the algorythm.
:param arr: list to sort
:return: sorted list
"""
new_arr = []
for index in range(len(arr)):
smallest = find_smallest(arr)
new_arr.append(arr.pop(smallest))
return new_arr | e618c5469ce77d830255dc16806f9499bed7ca9a | 12,392 |
def get_primary_monitor():
"""
Returns the primary monitor.
Wrapper for:
GLFWmonitor* glfwGetPrimaryMonitor(void);
"""
return _glfw.glfwGetPrimaryMonitor() | 0bcc55f64c1b8ce6bad31323e5a4bb6ff05eab47 | 12,393 |
def query_people_and_institutions(rc, names):
"""Get the people and institutions names."""
people, institutions = [], []
for person_name in names:
person_found = fuzzy_retrieval(all_docs_from_collection(
rc.client, "people"),
["name", "aka", "_id"],
person_name, case_sensitive=False)
if not person_found:
person_found = fuzzy_retrieval(all_docs_from_collection(
rc.client, "contacts"),
["name", "aka", "_id"], person_name, case_sensitive=False)
if not person_found:
print(
"WARNING: {} not found in contacts or people. Check aka".format(
person_name))
else:
people.append(person_found['name'])
inst = fuzzy_retrieval(all_docs_from_collection(
rc.client, "institutions"),
["name", "aka", "_id"],
person_found["institution"], case_sensitive=False)
if inst:
institutions.append(inst["name"])
else:
institutions.append(person_found.get("institution", "missing"))
print("WARNING: {} missing from institutions".format(
person_found["institution"]))
else:
people.append(person_found['name'])
pinst = get_recent_org(person_found)
inst = fuzzy_retrieval(all_docs_from_collection(
rc.client, "institutions"), ["name", "aka", "_id"],
pinst, case_sensitive=False)
if inst:
institutions.append(inst["name"])
else:
institutions.append(pinst)
print(
"WARNING: {} missing from institutions".format(
pinst))
return people, institutions | fd98a7557e2ee07b67ca8eddaf76c28b7b99033a | 12,394 |
from typing import Union
from typing import Tuple
def add_device(overlay_id) -> Union[str, Tuple[str, int]]:
"""
Add device to an overlay.
"""
manager = get_manager()
api_key = header_api_key(request)
if not manager.api_key_is_valid(api_key):
return jsonify(error="Not authorized"), 403
if not request.data:
return jsonify(error="Send device id to add to overlay in body"), 400
if "device_id" in request.json:
return manager.add_device_to_overlay(overlay_id,request.get_json()['device_id'])
return jsonify(error="Send device_id as JSON"), 400 | b9652b8d99672d0219df4821decebded458719bd | 12,395 |
from math import sin, cos
def pvtol(t, x, u, params={}):
"""Reduced planar vertical takeoff and landing dynamics"""
m = params.get('m', 4.) # kg, system mass
J = params.get('J', 0.0475) # kg m^2, system inertia
r = params.get('r', 0.25) # m, thrust offset
g = params.get('g', 9.8) # m/s, gravitational constant
c = params.get('c', 0.05) # N s/m, rotational damping
l = params.get('c', 0.1) # m, pivot location
return np.array([
x[3],
-c/m * x[1] + 1/m * cos(x[0]) * u[0] - 1/m * sin(x[0]) * u[1],
-g - c/m * x[2] + 1/m * sin(x[0]) * u[0] + 1/m * cos(x[0]) * u[1],
-l/J * sin(x[0]) + r/J * u[0]
]) | ff3357e6e1fc1b6f878d9f16b14eba0b687642cd | 12,396 |
from typing import List
from typing import Any
from typing import Callable
def route(
path: str, methods: List[str], **kwargs: Any
) -> Callable[[AnyCallable], AnyCallable]:
"""General purpose route definition. Requires you to pass an array of HTTP methods like GET, POST, PUT, etc.
The remaining kwargs are exactly the same as for FastAPI's decorators like @get, @post, etc.
Most users will probably want to use the shorter decorators like @get, @post, @put, etc. so they don't have to pass
the list of methods.
"""
def marker(method: AnyCallable) -> AnyCallable:
setattr(
method,
"_endpoint",
EndpointDefinition(
endpoint=method, args=RouteArgs(path=path, methods=methods, **kwargs)
),
)
return method
return marker | 9e499d59b48a3562f46bdcbde76d87ceb199691e | 12,397 |
import wx
def canHaveGui():
"""Return ``True`` if a display is available, ``False`` otherwise. """
# We cache this because calling the
# IsDisplayAvailable function will cause the
# application to steal focus under OSX!
try:
return wx.App.IsDisplayAvailable()
except ImportError:
return False | 9a9af0f46ca22faeb5f76e350d1c831bcba95343 | 12,398 |
def syntactic_analysis(input_fd):
"""
Realiza análisis léxico-gráfico y sintáctico de un programa Tiger.
@type input_fd: C{file}
@param input_fd: Descriptor de fichero del programa Tiger al cual se le debe
realizar el análisis sintáctico.
@rtype: C{LanguageNode}
@return: Como resultado del análsis sintáctico se obtiene el árbol de sintáxis
abstracta correspondiente al programa Tiger recibido como argumento. El
árbol se retorna a través del nodo de la raíz del árbol.
@raise SyntacticError: Esta excepción se lanzará si se encuentra algún error de
sintáxis durante el análisis del programa. La excepción contendrá información
acerca del error, como por ejemplo, la línea y/o columna donde se encontró
el error.
"""
data = input_fd.read()
ast = parser.parse(data)
return ast | 0d0481c8ac84ac1de1ff3f756f20f33bdc8a18e0 | 12,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.