content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def demosaic(cfa, pattern='RGGB'):
"""
Returns the demosaiced *RGB* colourspace array from given *Bayer* CFA using
bilinear interpolation.
Parameters
----------
CFA : array_like
*Bayer* color filter array (CFA).
pattern : unicode, optional
**{'RGGB', 'BGGR', 'GRBG', 'GBRG'}**,
Arrangement of the colour filters on the pixel array.
Returns
-------
ndarray
*RGB* colourspace array.
Notes
-----
- The definition output is not clipped in range [0, 1] : this allows for
direct HDRI / radiance image generation on *Bayer* CFA data and post
demosaicing of the high dynamic range data as showcased in this
`Jupyter Notebook <https://github.com/colour-science/colour-hdri/\
blob/develop/colour_hdri/examples/\
examples_merge_from_raw_files_with_post_demosaicing.ipynb>`_.
References
----------
- :cite:`Losson2010c`
Examples
--------
>>> CFA = np.array(
... [[0.30980393, 0.36078432, 0.30588236, 0.3764706],
... [0.35686275, 0.39607844, 0.36078432, 0.40000001]])
>>> demosaic(CFA)
array([[[ 0.69705884, 0.17941177, 0.09901961],
[ 0.46176472, 0.4509804 , 0.19803922],
[ 0.45882354, 0.27450981, 0.19901961],
[ 0.22941177, 0.5647059 , 0.30000001]],
<BLANKLINE>
[[ 0.23235295, 0.53529412, 0.29705883],
[ 0.15392157, 0.26960785, 0.59411766],
[ 0.15294118, 0.4509804 , 0.59705884],
[ 0.07647059, 0.18431373, 0.90000002]]])
>>> CFA = np.array(
... [[0.3764706, 0.360784320, 0.40784314, 0.3764706],
... [0.35686275, 0.30980393, 0.36078432, 0.29803923]])
>>> demosaic(CFA, 'BGGR')
array([[[ 0.07745098, 0.17941177, 0.84705885],
[ 0.15490197, 0.4509804 , 0.5882353 ],
[ 0.15196079, 0.27450981, 0.61176471],
[ 0.22352942, 0.5647059 , 0.30588235]],
<BLANKLINE>
[[ 0.23235295, 0.53529412, 0.28235295],
[ 0.4647059 , 0.26960785, 0.19607843],
[ 0.45588237, 0.4509804 , 0.20392157],
[ 0.67058827, 0.18431373, 0.10196078]]])
"""
cfa = np.asarray(cfa)
R_m, G_m, B_m = masks(cfa.shape, pattern)
H_G = np.asarray([[0, 1, 0],
[1, 4, 1],
[0, 1, 0]]) / 4
H_RB = np.asarray([[1, 2, 1],
[2, 4, 2],
[1, 2, 1]]) / 4
R = convolve(cfa * R_m, H_RB)
G = convolve(cfa * G_m, H_G)
B = convolve(cfa * B_m, H_RB)
return np.concatenate(
[R[..., np.newaxis], G[..., np.newaxis], B[..., np.newaxis]],
axis=-1
) | af50a6a8f19cbcbf60cc9e590fa12c12df65e0ca | 10,497 |
import random
def compare_skill(embedding, idx=None):
"""Display a skill its most similar skills in the embedding.
Args:
embedding (array): skills embedding
idx (int): index to select skill,
defaults to None (if None, a random index is chosen)
Returns:
df: dataframe of a skill and the skills it is closest
to in the embedding by cosine similarity
"""
if idx is None:
description = embedding[random.randint(0, len(embedding))]
else:
description = embedding[idx]
return (
skills[["preferredLabel", "description"]]
.assign(cosine_scores=util.pytorch_cos_sim(description, embedding)[0])
.sort_values(by=["cosine_scores"], ascending=False)
.head(10)
) | f885e744a3f32ba297a2429e7c69a2d7c37670da | 10,499 |
def translate_df(df: DataFrame) -> DataFrame:
"""
Función para traducir directamente un DataFrame
:param df: DataFrame a traducir
:return: DataFrame
"""
regs = df.Country.count() #Contamos la cantidad de registros en la columna 'Country' para servir como delimitador del for
# Usamos un for para traducir uno a uno los países, el parámetro lang_tgt nos indica el idioma al que queremos llegar y lang_src el de origen
for i in range(0, regs):
df.Country[i] = translator.translate(df.Country[i], lang_tgt='es', lang_src='en')
return df | 4cbe6281ca8b9900243a9928c26462bd35c38bdf | 10,500 |
def _process_caption_jieba(caption):
"""Processes a Chinese caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption.
"""
tokenized_caption = []
tokenized_caption.extend(jieba.cut(caption, cut_all=False))
return tokenized_caption | d57ec779ff211217322c5a5c8399fc607c2d2919 | 10,502 |
import requests
def create_connection(graph, node1, node2, linktype, propertydict=None, allow_dup=False):
"""
:param graph:
:param node1:
:param node2:
:param linktype:
:param propertydict:
:return:
"""
data = {}
data["graph"] = graph
data["node1"] = node1
data["node2"] = node2
data["linktype"] = linktype
data["propertydict"] = propertydict
data["allow_dup"] = allow_dup
resp = requests.post(config.APIURL + config.APIBaseURL + "create_connection", json=data)
if resp.status_code == 200:
ret = resp.json()
else:
ret = None
print("create connection")
return ret | fcbb830d023005fdf008ecbcd73bcd6470545b81 | 10,503 |
from src.architectures import resnet50 as nn
from src.architectures import resnet152 as nn
from src.architectures import vgg16 as nn
from src.architectures import vgg19 as nn
def get_nn(config):
"""
Args:
config: Path to the confi file generated during training
Returns:
Model instance
"""
# Loads the model configurations from config file
# generated during training
with open(config, 'r') as f:
C = json.load(f)
C = Struct(**C)
# Load correct network
if C.network == 'resnet50':
elif C.network == 'resnet152':
elif C.network == 'vgg16':
else:
# Create our model, load weights and then
# compile it
input_shape_img = (224, 224, 3)
img_input = Input(shape=input_shape_img)
base_layers = nn.nn_base(img_input)
classifier = nn.classifier(base_layers, trainable=False)
return Model(inputs=img_input, outputs=classifier) | 99ba4a9473b603800b318c49f0b8bbc02d504778 | 10,504 |
from typing import Any
from typing import Union
def get_functor(value: Any) -> Union[Functor, FunctorIter, FunctorDict]:
"""
Returns a base functor instance with a value property set to 'value'
of the class for either dictionary, other iterable or uniterable type,
and, where passed, a const property set to the constructor of 'value'.
>>> f = get_functor([1, 2, 3])
>>> print(f.__class__.__name__, f.value, f.const == list)
FunctorIter [1, 2, 3] True
"""
const = get_constructor(value)
if const in iterables_passed:
return FunctorIter(value, const)
if const == dict:
return FunctorDict(value)
return Functor(value) | 475ed04f052daa4d301f29c511f8d8ad4ce2b0f3 | 10,505 |
def split_bibtexs_by_bib_style(bibtexs):
"""
Args:
bibtexs (list of Queryset of Bibtex):
Returns:
list of tuple: (Style Key, Display Name, Bibtex List)
"""
# Get STYLE KYES
bibtex_backet = dict()
choices = expand_book_style_tuple(Book.STYLE_CHOICES) + list(
Bibtex.BIBSTYLE_CHOICES
)
for i, (key, _) in enumerate(choices):
# if key == "SAME_AS_BOOK":
# _idx_same_as_book = i
if key != "SAME_AS_BOOK":
bibtex_backet[key] = []
choices.pop(i)
# Split by Style
for bib in bibtexs:
bibtex_backet[bib.bib_type_key].append(bib)
# Make list of tuple
ret = []
for key, display_name in choices:
if len(bibtex_backet[key]) > 0:
ret.append((key, display_name, bibtex_backet[key]))
return ret | 00736e347b8b2601a418ba5f19d872a27e2ed13c | 10,506 |
def apply_gates(date, plate, gates_df, subpopulations=False, correlation=None):
""" Constructs dataframe with channels relevant to receptor quantification. """
if date == "5-16":
receptors = ['CD127']
channels = ['BL1-H']
else:
receptors = ['CD25', 'CD122', 'CD132']
channels = ["VL1-H", "BL5-H", "RL1-H"]
for i, r in enumerate(receptors):
cellTypes = ['T-helper', 'T-reg', 'NK', 'CD8+']
for j, cellType in enumerate(cellTypes):
if i == 0 and j == 0:
df, unstainedWell, isotypes = samp_Gate(date, plate, gates_df, cellType, r, correlation, subPop=subpopulations)
df = subtract_unstained_signal(df, channels, receptors, unstainedWell, isotypes)
else:
df2, unstainedWell2, isotypes2 = samp_Gate(date, plate, gates_df, cellType, r, correlation, subPop=subpopulations)
df2 = subtract_unstained_signal(df2, channels, receptors, unstainedWell2, isotypes2)
df = df.append(df2)
return df | 725df1a46a90f28da56e329a492e1534d81737ae | 10,507 |
from typing import Callable
from typing import Optional
def action_interaction_exponential_reward_function(
context: np.ndarray,
action_context: np.ndarray,
action: np.ndarray,
base_reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray],
action_interaction_weight_matrix: np.ndarray,
reward_type: str,
random_state: Optional[int] = None,
**kwargs,
) -> np.ndarray:
"""Reward function incorporating exponential interactions among combinatorial action
Parameters
-----------
context: array-like, shape (n_rounds, dim_context)
Context vectors characterizing each round (such as user information).
action_context: array-like, shape (n_unique_action, dim_action_context)
Vector representation for each action.
action: array-like, shape (n_unique_action * len_list)
Sampled action.
Action list of slate `i` is stored in action[`i` * `len_list`: (`i + 1`) * `len_list`].
base_reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=None
Function generating expected reward for each given action-context pair,
i.e., :math:`\\mu: \\mathcal{X} \\times \\mathcal{A} \\rightarrow \\mathbb{R}`.
If None is set, context **independent** expected reward for each action will be
sampled from the uniform distribution automatically.
reward_type: str, default='binary'
Type of reward variable, which must be either 'binary' or 'continuous'.
When 'binary' is given, expected reward is transformed by logit function.
action_interaction_weight_matrix (`W`): array-like, shape (len_list, len_list)
`W(i, j)` is the weight of how the expected reward of slot `i` affects that of slot `j`.
random_state: int, default=None
Controls the random seed in sampling dataset.
Returns
---------
expected_reward_factual: array-like, shape (n_rounds, len_list)
Expected rewards given factual actions (:math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) + \\sum_{j \\neq k} g^{-1}(f(x, a(j))) * W(k, j)`).
"""
if not isinstance(context, np.ndarray) or context.ndim != 2:
raise ValueError("context must be 2-dimensional ndarray")
if not isinstance(action_context, np.ndarray) or action_context.ndim != 2:
raise ValueError("action_context must be 2-dimensional ndarray")
if not isinstance(action, np.ndarray) or action.ndim != 1:
raise ValueError("action must be 1-dimensional ndarray")
if reward_type not in [
"binary",
"continuous",
]:
raise ValueError(
f"reward_type must be either 'binary' or 'continuous', but {reward_type} is given."
)
if action_interaction_weight_matrix.shape[0] * context.shape[0] != action.shape[0]:
raise ValueError(
"the size of axis 0 of action_interaction_weight_matrix multiplied by that of context must be the same as that of action"
)
# action_2d: array-like, shape (n_rounds, len_list)
action_2d = action.reshape(
(context.shape[0], action_interaction_weight_matrix.shape[0])
)
# action_3d: array-like, shape (n_rounds, n_unique_action, len_list)
action_3d = np.identity(action_context.shape[0])[action_2d].transpose(0, 2, 1)
# expected_reward: array-like, shape (n_rounds, n_unique_action)
expected_reward = base_reward_function(
context=context, action_context=action_context, random_state=random_state
)
if reward_type == "binary":
expected_reward = np.log(expected_reward / (1 - expected_reward))
# expected_reward_3d: array-like, shape (n_rounds, n_unique_action, len_list)
expected_reward_3d = np.tile(
expected_reward, (action_interaction_weight_matrix.shape[0], 1, 1)
).transpose(1, 2, 0)
# action_interaction_weight: array-like, shape (n_rounds, n_unique_action, len_list)
action_interaction_weight = action_3d @ action_interaction_weight_matrix
# weighted_expected_reward: array-like, shape (n_rounds, n_unique_action, len_list)
weighted_expected_reward = action_interaction_weight * expected_reward_3d
# expected_reward_factual: list, shape (n_rounds, len_list)
expected_reward_factual = weighted_expected_reward.sum(axis=1)
if reward_type == "binary":
expected_reward_factual = sigmoid(expected_reward_factual)
# q_l = \sum_{a} a3d[i, a, l] q_a + \sum_{a_1, a_2} delta(a_1, a_2)
# return: array, shape (n_rounds, len_list)
expected_reward_factual = np.array(expected_reward_factual)
assert expected_reward_factual.shape == (
context.shape[0],
action_interaction_weight_matrix.shape[0],
), f"response shape must be (n_rounds, len_list), but {expected_reward_factual.shape}"
return expected_reward_factual | 44d4b9a9a50334f273210ae8014aebe46661c61a | 10,508 |
def deserialize_once_dates(dates):
"""
Deserializes the dates as expected within a once dates object.
:param dates: The dates object
:return: A 2-tuple containing all the deserialized date parameters
"""
return (
du_parser.parse(dates[RULE_ONCE_S_TIME]),
du_parser.parse(dates[RULE_ONCE_E_TIME])
) | 1b1544abcf692c091e04965cb754279f4ecdf0f4 | 10,509 |
import copy
def gen_CDR_MitEx(
device_backend: Backend,
simulator_backend: Backend,
n_non_cliffords: int,
n_pairs: int,
total_state_circuits: int,
**kwargs
) -> MitEx:
"""
Produces a MitEx object for applying Clifford Circuit Learning & Clifford Data Regression
mitigation methods when calculating expectation values of observables. Implementation as
in arXiv:2005.10189.
:param device_backend: Backend object device experiments are default run through.
:type device_backend: Backend
:param simulator_backend: Backend object simulated characterisation experiments are
default run through.
:type simulator_backend: Backend
:param n_non_cliffords: Number of gates in Ansatz Circuit left as non-Clifford gates when
producing characterisation circuits.
:type n_non_cliffords: int
:param n_pairs: Number of non-Clifford gates sampled to become Clifford and vice versa
each time a new state circuit is generated.
:type n_pairs: int
:param total_state_circuits: Total number of state circuits produced for characterisation.
:type total_state_circuits: int
:key StatesSimulatorMitex: MitEx object noiseless characterisation simulations are executed on, default
simulator_backend with basic compilation of circuit.
:key StatesDeviceMitex: MitEx object noisy characterisation circuit are executed on, default
device_backend with basic compilation of circuit.
:key ExperimentMitex: MitEx object that actual experiment circuits are executed on, default
backend with some compilation of circuit.
:key model: Model characterised by state circuits, default _PolyCDRCorrect(1) (see cdr_post.py for other options).
:key likelihood_function: LikelihoodFunction used to filter state circuit results, given by a LikelihoodFunction Enum,
default set to none.
:key tolerance: Model can be perturbed when calibration circuits have by
exact expectation values too close to each other. This parameter
sets a distance between exact expectation values which at least some
calibration circuits should have.
:key distance_tolerance: The absolute tolerance on the distance between
expectation values of the calibration and original circuit.
:key calibration_fraction: The upper bound on the fraction of calibration
circuits which have noisy expectation values far from that of the
original circuit.
"""
_states_sim_mitex = copy.copy(
kwargs.get(
"states_simluator_mitex",
MitEx(
simulator_backend,
_label="StatesSimMitex",
mitres=gen_compiled_MitRes(simulator_backend, 0),
),
)
)
_states_device_mitex = copy.copy(
kwargs.get(
"states_device_mitex",
MitEx(
device_backend,
_label="StatesDeviceMitex",
mitres=gen_compiled_MitRes(device_backend, 0),
),
)
)
_experiment_mitex = copy.copy(
kwargs.get(
"experiment_mitex",
MitEx(
device_backend,
_label="ExperimentMitex",
mitres=gen_compiled_MitRes(device_backend, 0),
),
)
)
_states_sim_taskgraph = TaskGraph().from_TaskGraph(_states_sim_mitex)
_states_sim_taskgraph.parallel(_states_device_mitex)
_states_sim_taskgraph.append(ccl_result_batching_task_gen(total_state_circuits))
likelihood_function = kwargs.get("likelihood_function", LikelihoodFunction.none)
_experiment_taskgraph = TaskGraph().from_TaskGraph(_experiment_mitex)
_experiment_taskgraph.parallel(_states_sim_taskgraph)
_post_calibrate_task_graph = TaskGraph(_label="FitCalibrate")
_post_calibrate_task_graph.append(
ccl_likelihood_filtering_task_gen(likelihood_function)
)
_post_calibrate_task_graph.append(
cdr_calibration_task_gen(
device_backend,
kwargs.get("model", _PolyCDRCorrect(1)),
)
)
_post_task_graph = TaskGraph(_label="QualityCheckCorrect")
_post_task_graph.parallel(_post_calibrate_task_graph)
_post_task_graph.prepend(
cdr_quality_check_task_gen(
distance_tolerance=kwargs.get("distance_tolerance", 0.1),
calibration_fraction=kwargs.get("calibration_fraction", 0.5),
)
)
_experiment_taskgraph.prepend(
ccl_state_task_gen(
n_non_cliffords,
n_pairs,
total_state_circuits,
simulator_backend=simulator_backend,
tolerance=kwargs.get("tolerance", 0.01),
max_state_circuits_attempts=kwargs.get("max_state_circuits_attempts", 10),
)
)
_experiment_taskgraph.append(_post_task_graph)
_experiment_taskgraph.append(cdr_correction_task_gen(device_backend))
return MitEx(device_backend).from_TaskGraph(_experiment_taskgraph) | 04d78d36c3ef10252fd33079e0f44c7eab433bed | 10,510 |
import trace
def lambda_sum_largest_canon(expr, args):
"""
S_k(X) denotes lambda_sum_largest(X, k)
t >= k S_k(X - Z) + trace(Z), Z is PSD
implies
t >= ks + trace(Z)
Z is PSD
sI >= X - Z (PSD sense)
which implies
t >= ks + trace(Z) >= S_k(sI + Z) >= S_k(X)
We use the fact that
S_k(X) = sup_{sets of k orthonormal vectors u_i}\sum_{i}u_i^T X u_i
and if Z >= X in PSD sense then
\sum_{i}u_i^T Z u_i >= \sum_{i}u_i^T X u_i
We have equality when s = lambda_k and Z diagonal
with Z_{ii} = (lambda_i - lambda_k)_+
"""
X = expr.args[0]
k = expr.k
Z = Variable((X.shape[0], X.shape[0]), PSD=True)
obj, constr = lambda_max_canon(expr, [X - Z])
obj = k*obj + trace(Z)
return obj, constr | 5f6d6a44c67255eb27ff98d48570303768fcdc61 | 10,511 |
def compare_bib_dict(item1, item2):
""" compare bibtex item1 and item 2 in dictionary form """
# unique id check
col_list = ["doi", "pmid", "pmcid", "title", "local-url"]
for c in col_list:
if (item1.get(c, "1") != '') and (item1.get(c, "1") == item2.get(c, "2")):
return 1.0
score = 0.0
def _get_score(item1, item2, colname, s):
if item1.get(colname, "1") == '': return 0.0
if item1.get(colname, "2") == '': return 0.0
if item1.get(colname, "1") == item2.get(colname, "2"): return s
return 0.0
score = score + _get_score(item1, item2, "year", 0.2)
score = score + _get_score(item1, item2, "author", 0.2)
score = score + _get_score(item1, item2, "author1", 0.1)
score = score + _get_score(item1, item2, "journal", 0.2)
score = score + _get_score(item1, item2, "volume", 0.1)
return score | 87d974adec31c5c5fb130d0b5fd8a2b750f67eff | 10,512 |
def find_residues_lsfd(poles, H, fs):
"""Find residues from poles and FRF estimates
Estimate the (in band) residue matrices from poles and FRF's by
the Least Squares Frequency Domain Algorithm (LSFD).
A residue matrix is the outer product of the mode vector
and the modal participation factor. The mode vector can therefore
be recovered by SVD decomposition of the residue matrix.
Arguments
---------
poles : 1darray
Continous time poles (eigenvalues).
H : 3darray
FRF matrix where the first and second axis refers to the
outputs and inputs, respectively and the third axis
refers to the frequency.
fs : float
Sampling rate
Returns
-------
3darray
Residue matrices where the first dimension refers
to the poles, second dimension to outputs and
third to inputs, i.e. if `R` is the returned matrix
then `R[0]` is the residue matrix corresponding to
pole `poles[0]`.
"""
l, m, nf = H.shape
p = np.r_[poles, poles.conj()]
n = p.size
A = np.zeros((l*nf, (n+2)*l), dtype=complex)
w = 2*np.pi*np.linspace(0., fs/2, num=nf)
I = np.eye(l)
B = np.zeros((nf*l, m), dtype=complex)
for i, wi in enumerate(w):
A[i*l:(i+1)*l, -2*l:-1*l] = I / (1j*wi+1e-3)**1
A[i*l:(i+1)*l, -l:] = I * (1j*wi)
B[i*l:(i+1)*l, :] = H[:, :, i]
for j, pj in enumerate(p):
A[i*l:(i+1)*l, j*l:(j+1)*l] = I/(1j*wi-pj)
X = np.linalg.lstsq(A, B, rcond=None)[0]
return X[:l*n//2].reshape((n//2, l, m)) | f4cd257c8478f32b04f7737197451371f6e1bb12 | 10,513 |
def create_regularly_sampled_time_points(interval: pendulum.Duration, start_time_point: pendulum.DateTime, count: int):
"""
Create a sequence of `count` time points starting at `start_time_point`, `interval` apart.
Args:
interval: The time interval between each point.
start_time_point: The starting time point of the sequence.
count: The number of time points in the sequence.
Returns:
The sequence of time points.
"""
# I must handle a count of 0 specially because `pendulum` **includes** the endpoint of the specified range.
if count == 0:
return []
# The `pendulum` package, by default, **includes** the endpoint of the specified range. I want to exclude it when
# I create these series so my end point must be `count - 1`.
end_time_point = start_time_point + interval * (count - 1)
result = pendulum.period(start_time_point, end_time_point).range('seconds', interval.total_seconds())
return result | 22b133e10c6385b577ec9f1c32df932b6638e0f5 | 10,514 |
def create_model(bert_config, is_training, input_ids_list, input_mask_list,
segment_ids_list, use_one_hot_embeddings):
"""Creates a classification model."""
all_logits = []
input_ids_shape = modeling.get_shape_list(input_ids_list, expected_rank=2)
batch_size = input_ids_shape[0]
seq_length = input_ids_shape[1]
seq_length = seq_length // NUM_DOCS
def reshape_and_unstack_inputs(inputs, batch_size):
inputs = tf.reshape(inputs, [batch_size, NUM_DOCS, seq_length])
return tf.unstack(inputs, axis=1)
input_ids_list = reshape_and_unstack_inputs(input_ids_list, batch_size)
input_mask_list = reshape_and_unstack_inputs(input_mask_list, batch_size)
segment_ids_list = reshape_and_unstack_inputs(segment_ids_list, batch_size)
start_logits, end_logits = [], []
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE) as scope:
for i in range(len(input_ids_list)):
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids_list[i],
input_mask=input_mask_list[i],
token_type_ids=segment_ids_list[i],
use_one_hot_embeddings=use_one_hot_embeddings,
scope="bert")
final_hidden = model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/open_qa/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/open_qa/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(s_logits, e_logits) = (unstacked_logits[0], unstacked_logits[1])
start_logits.append(s_logits)
end_logits.append(e_logits)
start_logits = tf.concat(start_logits, axis=-1)
end_logits = tf.concat(end_logits, axis=-1)
return (start_logits, end_logits) | a96b5804f43561db25ad1d24acfa2b476318a904 | 10,515 |
import numpy
def where(condition: numpy.typing.ArrayLike, *args: PolyLike) -> ndpoly:
"""
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
preferred, as it behaves correctly for subclasses. The rest of this
documentation covers only the case where all three arguments a re
provided.
Args:
condition:
Where True, yield `x`, otherwise yield `y`.
x:
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns:
An array with elements from `x` where `condition` is True,
and elements from `y` elsewhere.
Examples:
>>> poly = numpoly.variable()*numpy.arange(4)
>>> poly
polynomial([0, q0, 2*q0, 3*q0])
>>> numpoly.where([1, 0, 1, 0], 7, 2*poly)
polynomial([7, 2*q0, 7, 6*q0])
>>> numpoly.where(poly, 2*poly, 4)
polynomial([4, 2*q0, 4*q0, 6*q0])
>>> numpoly.where(poly)
(array([1, 2, 3]),)
"""
if isinstance(condition, numpoly.ndpoly):
condition = numpy.any(numpy.asarray(
condition.coefficients), 0).astype(bool)
if not args:
return numpy.where(condition)
poly1, poly2 = numpoly.align_polynomials(*args)
coefficients = [numpy.where(condition, x1, x2)
for x1, x2 in zip(poly1.coefficients, poly2.coefficients)]
dtype = numpy.result_type(poly1.dtype, poly2.dtype)
return numpoly.polynomial_from_attributes(
exponents=poly1.exponents,
coefficients=coefficients,
names=poly1.names,
dtype=dtype,
) | c577df0f9c7bf900026f868c06260d0e5f242732 | 10,517 |
async def async_attach_trigger(
hass, config, action, automation_info, *, platform_type="event"
):
"""Listen for events based on configuration."""
event_types = config.get(CONF_EVENT_TYPE)
removes = []
event_data_schema = None
if config.get(CONF_EVENT_DATA):
event_data_schema = vol.Schema(
{
vol.Required(key): value
for key, value in config.get(CONF_EVENT_DATA).items()
},
extra=vol.ALLOW_EXTRA,
)
event_context_schema = None
if config.get(CONF_EVENT_CONTEXT):
event_context_schema = vol.Schema(
{
vol.Required(key): _schema_value(value)
for key, value in config.get(CONF_EVENT_CONTEXT).items()
},
extra=vol.ALLOW_EXTRA,
)
job = HassJob(action)
@callback
def handle_event(event):
"""Listen for events and calls the action when data matches."""
try:
# Check that the event data and context match the configured
# schema if one was provided
if event_data_schema:
event_data_schema(event.data)
if event_context_schema:
event_context_schema(event.context.as_dict())
except vol.Invalid:
# If event doesn't match, skip event
return
hass.async_run_hass_job(
job,
{
"trigger": {
"platform": platform_type,
"event": event,
"description": f"event '{event.event_type}'",
}
},
event.context,
)
removes = [
hass.bus.async_listen(event_type, handle_event) for event_type in event_types
]
@callback
def remove_listen_events():
"""Remove event listeners."""
for remove in removes:
remove()
return remove_listen_events | da72cd5dfd17da52d3164c69f3a49aafedaf851a | 10,518 |
def histogram_filter(x, lb=0, ub=1):
"""Truncates the tail of samples for better visualisation.
Parameters
----------
x : array-like
One-dimensional numeric arrays.
lb : float in [0, 1], optional
Defines the lower bound quantile
ub : float in [0, 1], optional
Defines the upper bound quantile
"""
return x[(np.quantile(x, q=lb) < x) & (x < np.quantile(x, q=ub))] | 35886093b23075a167443c29b30f2123dc1dcb70 | 10,519 |
import requests
def create_tweet(food_name):
"""Create the text of the tweet you want to send."""
r = requests.get(food2fork_url, params={"q": food_name, "key": F2F_KEY})
try:
r_json = r.json()
except Exception as e:
return "No recipe found. #sadpanda"
# fetch top-ranked recipe
recipe = r_json["recipes"][0]
recipe_f2f_url = recipe["f2f_url"]
recipe_name = recipe["title"]
recipe_publisher = recipe["publisher"]
recipe_img = recipe["image_url"]
text = "\"%s\" by %s: %s" % (recipe_name, recipe_publisher, recipe_f2f_url)
return text | 8bff622f3d15a184d05dc3bb9dd4291164e1ab46 | 10,520 |
from typing import List
def sublist(lst1: List[T1], lst2: List[T1]) -> bool:
"""
Check `lst1` is sublist of `lst2`.
Parameters
----------
lst1 : List[T1]
List 1.
lst2 : List[T1]
List 2.
Returns
-------
bool
`True` if `lst1` is sublist of `lst2`.
Examples
--------
>>> sublist([1,2,3], [1,2,3])
True
>>> sublist([1,2,3], [1,2,3,4])
True
>>> sublist([1,2,3,5], [1,2,3,4])
False
"""
return set(lst1) <= set(lst2) | de687a9fa4e9b46b6dc21d3f6c3037299b2c5d55 | 10,521 |
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
def func(grads,g):
grads.append(tf.expand_dims(g,0))
return grads
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
if g is None:
continue
# if tf.is_nan(g):
# continue
# Add 0 dimension to the gradients to represent the tower.
# expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
# grads=tf.cond(tf.reduce_any(tf.is_nan(g)), lambda:grads, lambda:func(grads,g))
grads.append(tf.expand_dims(g, 0))
if len(grads)==0:
continue
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads | 32c4faa4637943d2e82f5c91de75992237bb3986 | 10,522 |
from typing import Tuple
import tqdm
from typing import DefaultDict
def combine_predictions(indices1: NpArray, confs1: NpArray, indices2: NpArray,
confs2: NpArray) -> Tuple[NpArray, NpArray]:
""" Joins two predictions, returns sorted top-3 results in every row """
dprint(indices1.shape)
dprint(indices2.shape)
assert indices1.shape == indices2.shape
assert confs1.shape == confs2.shape
merged_indices = []
merged_confs = []
for idx1, conf1, idx2, conf2 in tqdm(zip(indices1, confs1, indices2, confs2),
total=indices1.shape[0]):
items: DefaultDict[int, float] = defaultdict(float)
for i, c in zip(idx1, conf1):
items[i] += c
for i, c in zip(idx2, conf2):
items[i] += c
indices = sorted(items.keys(), key=lambda i: -items[i])
confs = [items[i] for i in indices]
merged_indices.append(indices[:TOP_K])
merged_confs.append(confs[:TOP_K])
return np.array(merged_indices), np.array(merged_confs) | af76da618c894396f37ad1dc48512bb3e2658770 | 10,523 |
def get_speed_limit(center, rad, speed_limit):
"""
Retrieves the speed limit of the intersection circle
:param center: center coordinate point of the intersection circle
:param rad: radius of the intersection circle
:param speed_limit: speed limit of the intersection
:type center: Coordinates
:type rad: float
:type speed_limit: int
:return: speed limit of the intersection circle
"""
i = Intersection(center, rad, speed_limit)
return i.get_speed_limit() | cb5ccc4e3cce4f65076f70fb8f462069c3fd16f5 | 10,524 |
def db_eval(techniques,sequences,inputdir=cfg.PATH.SEGMENTATION_DIR,metrics=None):
""" Perform per-frame sequence evaluation.
Arguments:
techniques (string,list): name(s) of the method to be evaluated.
sequences (string,list): name(s) of the sequence to be evaluated.
inputdir (string): path to the technique(s) folder.
Returns:
db_eval_dict[method][measure][sequence] (dict): evaluation results.
"""
if isinstance(techniques,str): techniques = [techniques]
if isinstance(sequences,str): sequences = [sequences]
ndict = lambda: defaultdict(ndict)
db_eval_dict = ndict()
# RAW, per-frame evaluation
timer = Timer()
log.info("Number of cores allocated: %d"%cfg.N_JOBS)
for technique in techniques:
log.info('Evaluating technique: "%s"'%technique)
timer.tic()
J,j_M,j_O,j_D,F,f_M,f_O,f_D,T,t_M = \
zip(*Parallel(n_jobs=cfg.N_JOBS)(delayed(db_eval_sequence)(
technique,sequence,inputdir,metrics) for sequence in sequences))
log.info('Processing time: "%.3f"'%timer.toc())
# STORE RAW EVALUATION
for seq_id,sequence in enumerate(sequences):
db_eval_dict[technique]['J'][sequence] = J[seq_id]
db_eval_dict[technique]['F'][sequence] = F[seq_id]
db_eval_dict[technique]['T'][sequence] = T[seq_id]
return db_eval_dict | caa5b557db75ac354a1e0fc3964fcf544ff8522f | 10,527 |
def scale_constraint(source_obj, target_obj, maintain_offset=True):
"""
create scale constraint.
:param source_obj:
:param target_obj:
:param maintain_offset:
:return:
"""
return cmds.scaleConstraint(source_obj, target_obj, mo=maintain_offset)[0] | 47398fc37c7510a8d02b2a372ecca4fd9c51b20e | 10,528 |
def freq2note(freq):
"""Convert frequency in Hz to nearest note name.
Parameters
----------
freq : float [0, 23000[
input frequency, in Hz
Returns
-------
str
name of the nearest note
Example
-------
>>> aubio.freq2note(440)
'A4'
>>> aubio.freq2note(220.1)
'A3'
"""
nearest_note = int(freqtomidi(freq) + .5)
return midi2note(nearest_note) | 6810cd863b29f23f4bcbf657b5a4fc854f605640 | 10,529 |
def read_partpositions(filename, nspec, ctable=True, clevel=5, cname="lz4", quantize=None):
"""Read the particle positions in `filename`.
This function strives to use as less memory as possible; for this, a
bcolz ctable container is used for holding the data. Besides to be compressed
in-memory, its chunked nature makes a natural fit for data that needs to
be appended because it does not need expensive memory resize operations.
NOTE: This code reads directly from un UNFORMATTED SEQUENTIAL data Fortran
file so care has been taken to skip the record length at the beginning and
the end of every record. See:
http://stackoverflow.com/questions/8751185/fortran-unformatted-file-format
Parameters
----------
filename : string
The file name of the particle raw data
nspec : int
number of species in particle raw data
ctable : bool
Return a bcolz ctable container. If not, a numpy structured array is returned instead.
clevel : int
Compression level for the ctable container
cname : string
Codec name for the ctable container. Can be 'blosclz', 'lz4', 'zlib' or 'zstd'.
quantize : int
Quantize data to improve (lossy) compression. Data is quantized using
np.around(scale*data)/scale, where scale is 2**bits, and bits is
determined from the quantize value. For example, if quantize=1, bits
will be 4. 0 means that the quantization is disabled.
Returns
-------
ctable object OR structured_numpy_array
Returning a ctable is preferred because it is used internally so it does not require to be
converted to other formats, so it is faster and uses less memory.
Note: Passing a `quantize` param > 0 can increase the compression ratio of the ctable
container, but it may also slow down the reading speed significantly.
License
This function is taken from the reflexible package (https://github.com/spectraphilic/reflexible/tree/master/reflexible).
Authored by John F Burkhart <[email protected]> with contributions Francesc Alted <[email protected]>.
Licensed under: 'This script follows creative commons usage.'
"""
CHUNKSIZE = 10 * 1000
xmass_dtype = [('xmass_%d' % (i + 1), 'f4') for i in range(nspec)]
# note age is calculated from itramem by adding itimein
out_fields = [
('npoint', 'i4'), ('xtra1', 'f4'), ('ytra1', 'f4'), ('ztra1', 'f4'),
('itramem', 'i4'), ('topo', 'f4'), ('pvi', 'f4'), ('qvi', 'f4'),
('rhoi', 'f4'), ('hmixi', 'f4'), ('tri', 'f4'), ('tti', 'f4')] + xmass_dtype
raw_fields = [('begin_recsize', 'i4')] + out_fields + [('end_recsize', 'i4')]
raw_rectype = np.dtype(raw_fields)
recsize = raw_rectype.itemsize
cparams = bcolz.cparams(clevel=clevel, cname=cname)
if quantize is not None and quantize > 0:
out = get_quantized_ctable(raw_rectype, cparams=cparams, quantize=quantize, expectedlen=int(1e6))
else:
out = bcolz.zeros(0, dtype=raw_rectype, cparams=cparams, expectedlen=int(1e6))
with open(filename, "rb", buffering=1) as f:
# The timein value is at the beginning of the file
reclen = np.ndarray(shape=(1,), buffer=f.read(4), dtype="i4")[0]
assert reclen == 4
itimein = np.ndarray(shape=(1,), buffer=f.read(4), dtype="i4")
reclen = np.ndarray(shape=(1,), buffer=f.read(4), dtype="i4")[0]
assert reclen == 4
nrec = 0
while True:
# Try to read a complete chunk
data = f.read(CHUNKSIZE * recsize)
read_records = int(len(data) / recsize) # the actual number of records read
chunk = np.ndarray(shape=(read_records,), buffer=data, dtype=raw_rectype)
# Add the chunk to the out array
out.append(chunk[:read_records])
nrec += read_records
if read_records < CHUNKSIZE:
# We reached the end of the file
break
# Truncate at the max length (last row is always a sentinel, so remove it)
out.trim(1)
# Remove the first and last columns
out.delcol("begin_recsize")
out.delcol("end_recsize")
if ctable:
return out
else:
return out[:] | 7fb37c29ef6962bf77bb2d781a15d86835cbe1c6 | 10,530 |
def configure_l3(conf, tunnel_mode):
"""
This function creates a temporary test bridge and adds an L3 tunnel.
"""
s = util.start_local_server(conf[1][1])
server = util.rpc_client("127.0.0.1", conf[1][1])
server.create_bridge(DEFAULT_TEST_BRIDGE)
server.add_port_to_bridge(DEFAULT_TEST_BRIDGE, DEFAULT_TEST_PORT)
server.interface_up(DEFAULT_TEST_BRIDGE)
server.interface_assign_ip(DEFAULT_TEST_BRIDGE, conf[1][0],
None)
server.ovs_vsctl_set("Interface", DEFAULT_TEST_PORT, "type",
None, tunnel_mode)
server.ovs_vsctl_set("Interface", DEFAULT_TEST_PORT, "options",
"remote_ip", conf[0])
return s | 72c8a1e782e4b147a7fd67bda5b3433438bc089a | 10,533 |
def gradient_descent(y, tx, initial_w, gamma, max_iters):
"""Gradient descent algorithm."""
threshold = 1e-3 # determines convergence. To be tuned
# Define parameters to store w and loss
ws = [initial_w]
losses = []
w = initial_w
method = 'mse'
for n_iter in range(max_iters):
current_grad = gradient_least_square(y, tx, w)
current_loss = compute_loss(y, tx, w, method)
# Moving in the direction of negative gradient
w = w - gamma * current_grad
# Store w and loss
ws.append(w)
losses.append(current_loss)
# Convergence criteria
if len(losses) > 1 and np.abs(current_loss - losses[-1]) < threshold:
break
print("Gradient Descent({bi}): loss={l}".format(
bi=n_iter, l=current_loss))
return losses, ws | 147ee85a51647fed91c4c890e89e493c59ec1d14 | 10,534 |
import torch
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
# 初始化一个centroids矩阵,用于存储npoint个采样点的索引位置,大小为B×npoint
# 其中B为BatchSize的个数
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
# distance矩阵(B×N)记录某个batch中所有点到某一个点的距离,初始化的值很大,后面会迭代更新
distance = torch.ones(B, N).to(device) * 1e10
# farthest表示当前最远的点,也是随机初始化,范围为0~N,初始化B个;每个batch都随机有一个初始最远点
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
# batch_indices初始化为0~(B-1)的数组
batch_indices = torch.arange(B, dtype=torch.long).to(device)
# 直到采样点达到npoint,否则进行如下迭代:
for i in range(npoint):
# 设当前的采样点centroids为当前的最远点farthest
centroids[:, i] = farthest
# 取出该中心点centroid的坐标
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
# 求出所有点到该centroid点的欧式距离,存在dist矩阵中
dist = torch.sum((xyz - centroid) ** 2, -1)
# 建立一个mask,如果dist中的元素小于distance矩阵中保存的距离值,则更新distance中的对应值
# 随着迭代的继续,distance矩阵中的值会慢慢变小,
# 其相当于记录着某个Batch中每个点距离所有已出现的采样点的最小距离
mask = dist < distance
distance[mask] = dist[mask].float()
# 从distance矩阵取出最远的点为farthest,继续下一轮迭代
farthest = torch.max(distance, -1)[1]
return centroids | 55663ede9a4306f2f9f89bbc4476bd61c06c2138 | 10,535 |
def loadData(fname='Unstra.out2.00008.athdf'):
"""load 3d bfield and calc the current density"""
#data=ath.athdf(fname,quantities=['B1','B2','B3'])
time,data=ath.athdf(fname,quantities=['Bcc1'])
bx = data['Bcc1']
time,data=ath.athdf(fname,quantities=['Bcc2'])
by = data['Bcc2']
time,data=ath.athdf(fname,quantities=['Bcc3'])
bz = data['Bcc3']
x = data['x1f']
y = data['x2f']
z = data['x3f']
# ---
def curl(vx,vy,vz,dx,dy,dz):
[dzvx,dyvx,dxvx] = np.gradient(vx)
[dzvy,dyvy,dxvy] = np.gradient(vy)
[dzvz,dyvz,dxvz] = np.gradient(vz)
cx = dyvz/dy-dzvy/dz
cy = dzvx/dz-dxvz/dx
cz = dxvy/dx-dyvx/dy
# No need to del the reference by one manually
# allow python to perform its own garbage collection
# after the function return cxyz
#del dzvx
#del dzvy
#del dzvz
return cx,cy,cz
# ---
dx = dz = x[1]-x[0]
dy = y[1]-y[0]
jx,jy,jz = curl(bx,by,bz,dx,dy,dz)
j2 = jx**2+jy**2+jz**2
return bx,by,bz,j2 | 202b61dc0c97fbcd4d0df86cede821e8cdeb14dc | 10,536 |
def sparse_chain_crf_loss(y, x, U, b_start=None, b_end=None, mask=None):
"""Given the true sparsely encoded tag sequence y, input x (with mask),
transition energies U, boundary energies b_start and b_end, it computes
the loss function of a Linear Chain Conditional Random Field:
loss(y, x) = NLL(P(y|x)), where P(y|x) = exp(E(y, x)) / Z.
So, loss(y, x) = - E(y, x) + log(Z)
Here, E(y, x) is the tag path energy, and Z is the normalization constant.
The values log(Z) is also called free energy.
"""
x = add_boundary_energy(x, b_start, b_end, mask)
energy = path_energy0(y, x, U, mask)
energy -= free_energy0(x, U, mask)
return K.expand_dims(-energy, -1) | 9bdbee3c9a634cabf63e08957beee83e22565043 | 10,537 |
import select
async def query(database: Database, payload: PostionQueryIn):
""" Find whether a point is within a country """
query = select([countries.c.name, countries.c.iso2, countries.c.iso3])
# Convert a GeoPoint into a format that can be used in postgis queries
point = f"POINT({payload.location.longitude} {payload.location.latitude})"
query = query.where(
ST_Covers(countries.c.geog, ST_GeographyFromText(f"SRID=4326;{point}"))
)
results = await database.fetch_one(query=query)
return results | 8459831e453ae6e33a596f18792cb8cc4e2d896f | 10,539 |
from . import model
import pysynphot as S
def calc_header_zeropoint(im, ext=0):
"""
Determine AB zeropoint from image header
Parameters
----------
im : `~astropy.io.fits.HDUList` or
Image object or header.
Returns
-------
ZP : float
AB zeropoint
"""
scale_exptime = 1.
if isinstance(im, pyfits.Header):
header = im
else:
if '_dr' in im.filename():
ext = 0
elif '_fl' in im.filename():
if 'DETECTOR' in im[0].header:
if im[0].header['DETECTOR'] == 'IR':
ext = 0
bunit = im[1].header['BUNIT']
else:
# ACS / UVIS
if ext == 0:
ext = 1
bunit = im[1].header['BUNIT']
if bunit == 'ELECTRONS':
scale_exptime = im[0].header['EXPTIME']
header = im[ext].header
try:
fi = get_hst_filter(im[0].header).upper()
except:
fi = None
# Get AB zeropoint
if 'PHOTFLAM' in header:
ZP = (-2.5*np.log10(header['PHOTFLAM']) - 21.10 -
5*np.log10(header['PHOTPLAM']) + 18.6921)
ZP += 2.5*np.log10(scale_exptime)
elif 'PHOTFNU' in header:
ZP = -2.5*np.log10(header['PHOTFNU'])+8.90
ZP += 2.5*np.log10(scale_exptime)
elif (fi is not None):
if fi in model.photflam_list:
ZP = (-2.5*np.log10(model.photflam_list[fi]) - 21.10 -
5*np.log10(model.photplam_list[fi]) + 18.6921)
else:
print('Couldn\'t find PHOTFNU or PHOTPLAM/PHOTFLAM keywords, use ZP=25')
ZP = 25
else:
print('Couldn\'t find FILTER, PHOTFNU or PHOTPLAM/PHOTFLAM keywords, use ZP=25')
ZP = 25
# If zeropoint infinite (e.g., PHOTFLAM = 0), then calculate from synphot
if not np.isfinite(ZP):
try:
bp = S.ObsBandpass(im[0].header['PHOTMODE'].replace(' ', ','))
spec = S.FlatSpectrum(0, fluxunits='ABMag')
obs = S.Observation(spec, bp)
ZP = 2.5*np.log10(obs.countrate())
except:
pass
return ZP | 9f196df1f7160f4dac77cc3ac5ff792225fb0fcb | 10,540 |
def _convert_min_sec_to_sec(val):
"""
:param val: val is a string in format 'XmYsZ' like '0m5s3' meaning at secong 5,3
:return:
>>> _convert_min_sec_to_sec('10m11s2')
611.2
"""
_min = val.split('m')[0]
_sec = val.split('m')[1].split('s')[0]
_dsec = val.split('s')[1]
if len(_dsec) == 1:
_dsec = _dsec + '0'
res = int(_min) * 60 + int(_sec) + float(_dsec)/100.
return res | f402e6221fa97ec5ccdb9b194478b916e85fdf85 | 10,541 |
def sitetester_home():
"""
Home screen for Tester:
A Tester can:
a. Change their testing site
b. View apspointments for the site they work at
c. Create an appointment for their testing site
d. View aggregate test results
e. View daily test results
"""
error = None
username = session['user_id']
_is_tester, _ = is_tester(username)
if not _is_tester:
error = 'You do not have access to this page.'
return render_template('login.html', error=error)
if request.method == 'POST':
_instr = request.form['submit_button']
if _instr == 'Aggregate':
return redirect(url_for("aggregrate_test_results"))
elif _instr == 'Daily':
return redirect(url_for("daily"))
elif _instr == 'Change Sites':
return redirect(url_for("tester_changesite", id = username))
elif _instr == 'View Appointments':
return redirect(url_for("view_appointments"))
elif _instr == 'Create Appointment':
return redirect(url_for('create_appointment'))
else:
error = "Invalid selection"
return render_template("sitetester_home.html", error=error)
else:
return render_template("sitetester_home.html", error=error) | 3879cbe8d2f4b9e51d9cb94362ba4eb09e1ae2cf | 10,542 |
from datetime import datetime
def datetime_to_fractional_year(input: datetime) -> float:
"""Converts a Python datetime object to a fractional year."""
start = date(input.year, 1, 1).toordinal() # type: ignore
year_length = date(input.year + 1, 1, 1).toordinal() - start # type: ignore
return input.year + (input.toordinal() - start) / year_length | 576361cad890f709d6d02c56f53c43529211fb2b | 10,543 |
from typing import Optional
def _optical_flow_to_rgb(
flow: tf.Tensor,
saturate_magnitude: float = -1.0,
name: Optional[str] = None,
) -> tf.Tensor:
"""Visualize an optical flow field in RGB colorspace."""
name = name or 'OpticalFlowToRGB'
hsv = _optical_flow_to_hsv(flow, saturate_magnitude, name)
return tf.image.hsv_to_rgb(hsv) | 3e314411ae6c3efbcb619ead61d99df614959720 | 10,544 |
def shuffle_dict(dict_1, dict_2, num_shuffles=10):
"""
Shuffles num_shuffles times
for two dictionaries that you want to compare against each other, shuffles them.
returns two di
"""
shuffled_dict_1 = {}
shuffled_dict_2 = []
for x in range(num_shuffles):
for dataset_name, dataset_element in dict_1.items():
if dataset_name not in shuffled_dict_1:
shuffled_dict_1[dataset_name] = []
shuffled_dict_1[dataset_name].append(shuffleBedTool(dataset_name + str(x), dataset_element))
for dataset_name, dataset_element in dict_2.items():
if dataset_name not in shuffled_dict_2:
shuffled_dict_2[dataset_name] = []
shuffled_dict_2[dataset_name].append(shuffleBedTool(dataset_name + str(x), dataset_element))
return shuffled_dict_1, shuffled_dict_2 | 6289f76ece3ecfb163ee3c9c1fe88d93fb259716 | 10,545 |
def clear_dd2_selection(val, n_clicks):
"""Clear Dropdown selections for Dropdown #2 (dd2)
( Dropdown to clear #2 of 2 )
Args:
val (str): cascading response via `clear_dd2_selection()` callback
n_clicks: int
Returns:
str: Resets selections to default, blank states.
"""
if n_clicks > 0:
app.logger.info(
f"-:!:- FUNCTION('clear_dd2_selection') has been activated, and now has value 'n_clicks' = {n_clicks} & 'val' = {val}"
)
if val == "None":
return "None"
else:
return None | 66533ed8a534cf0f95d2c3d4cc5827a96b1b0aeb | 10,546 |
def get_SHF_L_min_C():
""":return: 冷房負荷最小顕熱比率 (-)"""
return 0.4 | 274728ea22800ade77bfe4e41bc41a05b97ac483 | 10,547 |
from typing import Union
import pathlib
def get_path(obj: Union[str, pathlib.Path]) -> pathlib.Path:
"""Convert a str into a fully resolved & expanded Path object.
Args:
obj: obj to convert into expanded and resolved absolute Path obj
"""
return pathlib.Path(obj).expanduser().resolve() | 88641ea4a6ae54aea12b7d0c9afca8d6f475b8d0 | 10,549 |
import re
def verify_policy_type_id(policy_type_id):
"""
:type policy_type_id: str
:param policy_type_id: policy type id - e.g. storage-policy-00000001
:rtype: int
:return: Fixed policy type ID
:raises: ValueError: policy type id
"""
if not re.match("storage-policy-\d+", policy_type_id):
raise ValueError('{0} is not a valid policy type ID.'.format(policy_type_id))
return int(policy_type_id.split("-")[2]) | ff1bf183add0f2ce1dba78345a7b9fdbc2048e6c | 10,550 |
def f_fg_iou(results):
"""Calculates foreground IOU score.
Args:
a: list of [T, H, W] or [H, W], binary mask
b: list of [T, H, W] or [H, W], binary mask
Returns:
fg_iou: [B]
"""
y_out = results['y_out']
y_gt = results['y_gt']
num_ex = len(y_gt)
fg_iou = np.zeros([num_ex])
if len(y_gt[0].shape) == 3:
for ii in range(num_ex):
fg_iou[ii] = f_iou(y_out[ii].max(axis=0), y_gt[ii].max(axis=0))
else:
for ii in range(num_ex):
fg_iou[ii] = f_iou(y_out[ii], y_gt[ii])
return fg_iou | 7580f1d9317437c7f3568106c4aa1f31bff99ed4 | 10,551 |
def render_template(template, defaults):
"""Render script template to string"""
if not isinstance(template, Template):
filename = template.format(**defaults)
template = Template(filename=filename)
return template.format(**defaults) | e44554132663d8e9d4287258a27559a3ab757912 | 10,552 |
def get_hash(string):
"""
FNV1a hash algo. Generates a (signed) 64-bit FNV1a hash.
See http://www.isthe.com/chongo/tech/comp/fnv/index.html for math-y details.
"""
encoded_trimmed_string = string.strip().encode('utf8')
assert isinstance(encoded_trimmed_string, bytes)
i64 = FNV1_64A_INIT
for byte in encoded_trimmed_string:
i64 = i64 ^ byte
i64 = (i64 * FNV_64_PRIME) % (2 ** 64)
# wrap the result into the full signed BIGINT range of the underlying RDBMS
if i64 > BIGGEST_64_INT:
i64 = SMALLEST_64_INT + (i64 - BIGGEST_64_INT - 1) # optimized CPU ops
return i64 | c2315b8cc6b133b158f99ddbfd6afcd50562d7c0 | 10,553 |
def load_seq_sizes(def_vars):
"""Load sequence sizes."""
seq1_sizes, seq2_sizes = {}, {}
if def_vars.get("SEQ1_LEN"):
seq1_sizes = read_chrom_sizes(def_vars.get("SEQ1_LEN"))
elif def_vars.get("SEQ1_CTGLEN"):
seq1_sizes = read_chrom_sizes(def_vars.get("SEQ1_CTGLEN"))
else:
clean_die("", "Cannot find SEQ1_LEN|SEQ1_CTGLEN in the DEF file")
if def_vars.get("SEQ2_LEN"):
seq2_sizes = read_chrom_sizes(def_vars.get("SEQ2_LEN"))
elif def_vars.get("SEQ1_CTGLEN"):
seq2_sizes = read_chrom_sizes(def_vars.get("SEQ2_CTGLEN"))
else:
clean_die("", "Cannot find SEQ2_LEN|SEQ2_CTGLEN in the DEF file")
return seq1_sizes, seq2_sizes | 3ba9ea6057bed3f787adfaf05cbd2a374d7a4357 | 10,554 |
from pathlib import Path
from typing import Optional
def extract_source(source_path: Path) -> (Path, Optional[str]):
"""Extract the source archive, return the extracted path and optionally the commit hash stored inside."""
extracted_path = source_path.with_name(source_path.stem)
commit_hash = None
# Determine the source archive type before extracting it
# Inspired by: https://stackoverflow.com/a/13044946/7597273
magic_dict = {
b'\x1f\x8b\x08': 'gz',
b'\x42\x5a\x68': 'bz2',
b'\x50\x4b\x03\x04': 'zip',
}
max_len = max(len(x) for x in magic_dict)
with source_path.open('rb') as f:
file_start: bytes = f.read(max_len)
for magic, archive_type in magic_dict.items():
if file_start.startswith(magic):
break
else:
raise TypeError(f'Unknown source archive type: `{source_path.name}`')
if archive_type in ('gz', 'bz2'):
with TarFile.open(str(source_path), 'r:' + archive_type) as tar:
# Commit hash (if downloaded from GitHub)
commit_hash = tar.pax_headers.get('comment')
# Update extracted path because:
# `<commit-hash>[.tar.gz]` extracts a folder named `repo-name-<commit-hash>`
# `<branch-name>[.tar.gz]` extracts a folder named `repo-name-<branch-name>`
root_files = [name for name in tar.getnames() if '/' not in name]
if len(root_files) == 1:
extracted_path = source_path.with_name(root_files[0])
tar.extractall(str(extracted_path.parent))
elif archive_type == 'zip':
with ZipFile(str(source_path), 'r') as zipf:
# Commit hash (if downloaded from GitHub)
if zipf.comment:
commit_hash = zipf.comment.decode('utf-8')
# Update extracted path because:
# `<commit-hash>[.zip]` extracts a folder named `repo-name-<commit-hash>`
# `<branch-name>[.zip]` extracts a folder named `repo-name-<branch-name>`
root_folders = []
root_files = []
for name in zipf.namelist():
if name.count('/') == 1 and name.endswith('/'):
root_folders.append(name.rstrip('/'))
if name.count('/') == 0:
root_files.append(name)
# If only one root folder
if len(root_folders) == 1 and len(root_files) == 0:
extracted_path = source_path.with_name(root_folders[0])
zipf.extractall(str(extracted_path.parent))
return extracted_path, commit_hash | 4659ac1bf79662ffe0d444c3d0053528fbb28a48 | 10,555 |
def correct_distribution (lines):
"""
Balance the distribution of angles
Define an ideal value of samples per bin. If the count per bin is greated
than the the average, then randomly remove the items only for that bin
"""
angles = np.float32(np.array(lines)[:, 3])
num_bins = 21
hist, bins = plot_histogram( num_bins, angles, 'Histogram - before distribution correction')
#correct the distribution
ideal_samples = len(angles)/num_bins * 1.5
keep_prob = [1 if hist[i] < ideal_samples else ideal_samples/hist[i] for i in range(num_bins) ]
remove_list = []
for x, y in ((i,j) for i in range(len(angles)) for j in range(num_bins)):
if angles[x] > bins[y] and angles[x] <= bins[y+1]:
if np.random.rand() > keep_prob[y]:
remove_list.append(x)
lines = np.delete(lines, remove_list, axis=0)
# check if distribution is ok
angles = np.float32(np.array(lines)[:, 3])
hist = plot_histogram(num_bins , angles, 'Histogram - after distribution correction')
return lines | bb69e63fedf353ca5a5b64d47971bc037575905e | 10,556 |
def numDockedWindows():
""" Determine the amount of docked windows (i.e. visible on all desktops).
return - Number of windows.
"""
stdout = runCommand(COMMAND_LIST_WINDOWS)
result = -2 # first two windows are actually not windows and don't count
for i in iter(stdout.splitlines()):
if i[POS_COMMAND_LIST_WINDOWS_DOCKED] == CHAR_COMMAND_LIST_WINDOWS_DOCKED:
result += 1
return result | 41d98c9c18c5ffc675ca241411a3f470f125ec90 | 10,557 |
def get_value(hive, key_name, value_name):
"""
>>> get_value(
... HKEY_LOCAL_MACHINE,
... "SOFTWARE/Microsoft/Windows/CurrentVersion/Explorer/StartMenu",
... "Type")
[1, 'group']
>>> get_value(
... HKEY_CURRENT_USER,
... "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\CLSID\\{645FF040-5081-101B-9F08-00AA002F954E}\\DefaultIcon",
... "Full")
[2, '%SystemRoot%\\\\System32\\\\shell32.dll,32', 'C:\\\\WINDOWS\\\\System32\\\\shell32.dll,32']
"""
assert hive in (HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER, HKEY_USERS, HKEY_CLASSES_ROOT)
result = None
key_name = key_name.replace("/", "\\")
with reg_connect_registry(None, hive) as reghandle:
with reg_open_key(reghandle, key_name) as keyhandle:
try:
rval, rtype = win32api.RegQueryValueEx(keyhandle, value_name)
result = [rtype, rval]
if rtype == win32con.REG_EXPAND_SZ:
result.append(_expand_path_variables(rval))
except Exception, e:
print e
return None
return result | 61abd243eab56eda43ac5f33bbe19ea1c1a78475 | 10,559 |
def addScore(appId: str, scoreName: str, value: int, userId: str,
checksum: str=Header(None), db=Depends(Database)):
""" Add user score to leaderboard
"""
validateParameters(appId=appId, scoreName=scoreName, value=value, userId=userId,
checksum=checksum)
with db.transaction() as store:
leaderboard = Leaderboards(appId=appId, userId=userId, scoreName=scoreName,
value=value)
store.merge(leaderboard)
store.commit()
userRankChecksum = computeChecksum(appId=appId, userId=userId,
scoreName=scoreName)
return getUserRank(appId, scoreName, userId, userRankChecksum, db) | de957586bfa3508ac52ed438922d401bfe1ddc74 | 10,560 |
def has_affect(builtin: str) -> bool:
"""Return `True` if the given builtin can affect accessed attributes."""
if builtin not in PYTHON_BUILTINS:
raise ValueError(f"'{builtin}' is not a Python builtin")
return builtin in PYTHON_ATTR_BUILTINS | 7bb2897659d8e4b68c1b7f0cb7b1870bcca616cf | 10,561 |
def get_plot_for_different_k_values(similarity, model_name):
"""
This function plots points after applying a cluster method for k=3,4,5,6. Furthermore prints silhouette score for each k
:param similarity: Contains our dataset (The similarity of RIPE monitors)
:return: A list containing silhouette score
"""
silhouette_scores = []
f = plt.figure()
f.add_subplot(2, 2, 1)
for i in range(3, 7):
if model_name == 'Spectral':
sc = SpectralClustering(n_clusters=i, affinity='precomputed').fit(similarity)
else:
sc = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=1e-04, random_state=0).fit(similarity)
silhouette_scores.append(silhouette_score(similarity, sc.labels_))
f.add_subplot(2, 2, i - 2)
plt.scatter(similarity[:, 0], similarity[:, 1], s=5, c=sc.labels_, label="n_cluster-" + str(i))
plt.legend()
plt.show()
return silhouette_scores | e4437ccac9ef1cac8d498df3467e3c8a02e449ce | 10,562 |
def RandomBrightness(image, delta, seed=None):
"""Adjust the brightness of RGB or Grayscale images.
Tips:
delta extreme value in the interval [-1, 1], >1 to white, <-1 to black.
a suitable interval is [-0.5, 0.5].
0 means pixel value no change.
Args:
image: Tensor or array. An image.
delta: if int, float, Amount to add to the pixel values.
if list, tuple, randomly picked in the interval
`[delta[0], delta[1])` to add to the pixel values.
seed: A Python integer. Used to create a random seed. See
`tf.set_random_seed` for behavior.
Returns:
A brightness-adjusted tensor of the same shape and type as `image`.
Raises:
ValueError: if `delta` type is error.
"""
if isinstance(delta, (int, float)):
assert -1<=delta<=1, 'delta should be in the interval [-1, 1].'
image = tf.image.adjust_brightness(image, delta)
elif isinstance(delta, (list, tuple)):
assert -1<=delta[0]<delta[1]<=1, 'delta should be 1 >= delta[1] > delta[0] >= -1.'
random_delta = tf.random.uniform([], delta[0], delta[1], seed=seed)
image = tf.image.adjust_brightness(image, random_delta)
else:
raise ValueError('delta should be one of int, float, list, tuple.')
return image | efdcb6ad5b3339d3ba8f85264f50020645d17c87 | 10,563 |
def fetch_rfc(number):
"""
RFC fetcher
>>> fetch_rfc("1234")
(u'https://tools.ietf.org/html/rfc1234', 'Tunneling IPX traffic through IP networks')
"""
url = "https://tools.ietf.org/html/rfc%s" % (number, )
xml, dummy_response = fetch_and_parse_xml(url)
title = xml.xpath('string(//meta[@name="DC.Title"]/@content)')
return url, (title or None) | 81d91e7a3255077a9b0fb66807fadf35c165d2a0 | 10,564 |
def samefile(path1, path2, user=None):
"""
Return :obj:`True` if both path arguments refer to the same path.
"""
def tr(p):
return abspath(normpath(realpath(p)), user=user)
return tr(path1) == tr(path2) | 1d898eff82502c5aece2f60ffc562529e4dd3062 | 10,565 |
def get_list_coord(G, o, d):
"""Get the list of intermediate coordinates between
nodes o and d (inclusive).
Arguments:
G {networkx} -- Graph
o {int} -- origin id
d {int} -- destination id
Returns:
list -- E.g.: [(x1, y1), (x2, y2)]
"""
edge_data = G.get_edge_data(o, d)[0]
try:
return ox.LineString(edge_data['geometry']).coords
except:
return [(G.node[o]['x'], G.node[o]['y']), (G.node[d]['x'], G.node[d]['y'])] | 901d64d81493408ed8a17b99701088daea7fd40f | 10,566 |
from datetime import datetime
def todays_date():
"""
Returns today's date in YYYYMMDD format.
"""
now = datetime.datetime.now()
date_str = "{0}{1}{2}".format(now.year, now.strftime('%m').zfill(2), now.strftime('%d').zfill(2))
return date_str | fb94f5af79640363ed58bbe0191def5d465cd37a | 10,567 |
def kl_divergence(p_probs, q_probs):
""""KL (p || q)"""
kl_div = p_probs * np.log(p_probs / q_probs)
return np.sum(kl_div[np.isfinite(kl_div)]) | 153a5cae3a20f79d92ce7ececf085ee344baf6a9 | 10,568 |
def colors_to_main(colors, main_colors):
""" Mapping image colors to main colors and count pixels
:param: colors: all colors in image
:param: main_colors: input main colors
(blue, green, yellow, purple, pink, red, orange, brown, silver, white, gray, black)
:return: colors
"""
colors.sort(reverse=True)
main_color_init(main_colors)
for c1 in colors:
color_flag = lab_to_color(c1.lab)
smallest_diff = 1000
smallest_index = None
for n, c2 in enumerate(main_colors):
if color_flag is not None:
if c2.name == color_flag:
smallest_index = n
break
else:
if c2.name in ['white', 'silver', 'gray', 'black']:
continue
color_diff = diff_cie76(c1.lab, c2.lab)
if color_diff < smallest_diff:
smallest_diff = color_diff
smallest_index = n
main_colors[smallest_index].count += c1.count
colors = [color for color in main_colors]
colors.sort(reverse=True)
return colors | 0ccc41f199cb2aaad78fb7583d8aeb2bb1646e12 | 10,569 |
def get_arg_value_wrapper(
decorator_func: t.Callable[[ArgValGetter], Decorator],
name_or_pos: Argument,
func: t.Callable[[t.Any], t.Any] = None,
) -> Decorator:
"""
Call `decorator_func` with the value of the arg at the given name/position.
`decorator_func` must accept a callable as a parameter to which it will pass a mapping of
parameter names to argument values of the function it's decorating.
`func` is an optional callable which will return a new value given the argument's value.
Return the decorator returned by `decorator_func`.
"""
def wrapper(args: BoundArgs) -> t.Any:
value = get_arg_value(name_or_pos, args)
if func:
value = func(value)
return value
return decorator_func(wrapper) | 3ffb17927fd784571a7d0f22bcc46e7191711c91 | 10,570 |
def yaml_request(request: quart.local.LocalProxy) -> bool:
"""Given a request, return True if it contains a YAML request body"""
return request.content_type in (
"text/vnd.yaml",
"text/yaml",
"text/x-yaml",
"application/vnd.yaml",
"application/x-yaml",
"application/yaml",
) | 9aa31eff17d799058e272193266af46b87b618ae | 10,571 |
def validatePath(path):
"""
Returns the validated path.
:param path: string or unicode - Path to format
.. note:: Only useful if you are coding for both Linux and Windows for fixing slash problems.
e.g. Corrects 'Z://something' -> 'Z:'
Example::
fpath = xbmc.validatePath(somepath)
"""
return unicode() | cb809f3a78de96d220302700d5d2a68e402fed4c | 10,572 |
import requests
def get_raster_availability(layer, bbox=None):
"""retrieve metadata for raster tiles that cover the given bounding box
for the specified data layer.
Parameters
----------
layer : str
dataset layer name. (see get_available_layers for list)
bbox : (sequence of float|str)
bounding box of in geographic coordinates of area to download tiles
in the format (min longitude, min latitude, max longitude, max latitude)
Returns
-------
metadata : geojson FeatureCollection
returns metadata including download urls as a FeatureCollection
"""
base_url = 'https://www.sciencebase.gov/catalog/items'
params = [
('parentId', layer_dict[layer]),
('filter', 'tags=IMG'),
('max', 1000),
('fields', 'webLinks,spatial,title'),
('format', 'json'),
]
if bbox:
xmin, ymin, xmax, ymax = [float(n) for n in bbox]
polygon = 'POLYGON (({}))'.format(','.join([(repr(x) + ' ' + repr(y)) for x,y in [
(xmin, ymax),
(xmin, ymin),
(xmax, ymin),
(xmax, ymax),
(xmin, ymax)]]))
params.append(('filter', 'spatialQuery={{wkt:"{}",relation:"{}"}}'.format(polygon, 'intersects')))
features = []
url = base_url
while url:
r = requests.get(url, params)
print('retrieving raster availability from %s' % r.url)
params = [] # not needed after first request
content = r.json()
for item in content['items']:
feature = Feature(geometry=Polygon(_bbox2poly(item['spatial']['boundingBox'])), id=item['id'],
properties={
'name': item['title'],
'layer': layer,
'format': '.img',
'download url': [x for x in item['webLinks'] if x['type']=='download'][0]['uri']}
)
features.append(feature)
if content.get('nextlink'):
url = content['nextlink']['url']
else:
break
return FeatureCollection(features) | bcd0c78dcc807830dc3b9e7883ecfe51e8c5fb14 | 10,573 |
def absolute_error(observed, modeled):
"""Calculate the absolute error between two arrays.
:param observed: Array of observed data
:type observed: numpy.ndarray
:param modeled: Array of modeled data
:type modeled: numpy.ndarray
:rtype: numpy.ndarray
"""
error = observed - modeled
return error | ef5aa10fbe25689c1197c1ce7a54401be020de1e | 10,574 |
from typing import Optional
def prepare_tempdir(suffix: Optional[str] = None) -> str:
"""Preapres a temprary directory, and returns the path to it.
f"_{version}" will be used as the suffix of this directory, if provided.
"""
suffix = "_" + suffix if suffix else None
dir_str = mkdtemp(suffix, "warsawgtfs_")
return dir_str | fcc562240c8bbe3211d460d0888342fe422ef27a | 10,575 |
def parse_number(string):
"""
Retrieve a number from the string.
Parameters
----------
string : str
the string to parse
Returns
-------
number : float
the number contained in the string
"""
num_str = string.split(None, 1)[0]
number = float(num_str)
return number | e3833873311ec142edcd2ba0301894bb000dff78 | 10,576 |
def catch_exception(func):
"""
Decorator that catches exception and exits the code if needed
"""
def exit_if_failed(*args, **kwargs):
try:
result = func(*args, **kwargs)
except (NonZeroExitCodeException, GitLogParsingException) as exception:
Logger.error(exception.message)
quit()
else:
return result
return exit_if_failed | 17075b508d490d9623b047bc854a0dfe654fd255 | 10,577 |
def _coord_byval(coord):
"""
Turns a COORD object into a c_long.
This will cause it to be passed by value instead of by reference. (That is what I think at least.)
When runing ``ptipython`` is run (only with IPython), we often got the following error::
Error in 'SetConsoleCursorPosition'.
ArgumentError("argument 2: <class 'TypeError'>: wrong type",)
argument 2: <class 'TypeError'>: wrong type
It was solved by turning ``COORD`` parameters into a ``c_long`` like this.
More info: http://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx
"""
return c_long(coord.Y * 0x10000 | coord.X & 0xFFFF) | baf2dd26e5e9307074fc04a6870353206e49c291 | 10,578 |
from typing import Union
from typing import Dict
def _list_flows(output_format='dict', **kwargs) -> Union[Dict, pd.DataFrame]:
"""
Perform the api call that return a list of all flows.
Parameters
----------
output_format: str, optional (default='dict')
The parameter decides the format of the output.
- If 'dict' the output is a dict of dict
- If 'dataframe' the output is a pandas DataFrame
kwargs: dict, optional
Legal filter operators: uploader, tag, limit, offset.
Returns
-------
flows : dict, or dataframe
"""
api_call = "flow/list"
if kwargs is not None:
for operator, value in kwargs.items():
api_call += "/%s/%s" % (operator, value)
return __list_flows(api_call=api_call, output_format=output_format) | 67d7e0d2a75cc1f6dbeb66af354282cff0983be0 | 10,579 |
def _take_many_sparse_from_tensors_map(sparse_map_op,
sparse_handles,
rank=None,
name=None):
"""Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
The input `sparse_handles` must be a string matrix of shape `[N, 1]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse.reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
sparse_map_op: The `Operation` that created the original handles.
Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.
sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.
The serialized and packed `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
if not isinstance(sparse_map_op, ops.Operation):
raise TypeError("sparse_map_op be an Operation")
if sparse_map_op.type not in ("AddSparseToTensorsMap",
"AddManySparseToTensorsMap"):
raise TypeError(
"sparse_map_op must be one of AddSparseToTensorsMap or "
"AddSparseToTensorsMap. Instead, found `%s`." % sparse_map_op.type)
with ops.colocate_with(sparse_map_op):
shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name
output_indices, output_values, output_shape = (
gen_sparse_ops.take_many_sparse_from_tensors_map(
sparse_handles,
dtype=sparse_map_op.get_attr("T"),
container=sparse_map_op.get_attr("container"),
shared_name=shared_name,
name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape) | ab9a646f573823de300bd78d0fb5ff7d23311445 | 10,580 |
def rts_smooth(kalman_filter, state_count=None):
"""
Compute the Rauch-Tung-Striebel smoothed state estimates and estimate
covariances for a Kalman filter.
Args:
kalman_filter (KalmanFilter): Filter whose smoothed states should be
returned
state_count (int or None): Number of smoothed states to return.
If None, use ``kalman_filter.state_count``.
Returns:
(list of MultivariateNormal): List of multivariate normal distributions.
The mean of the distribution is the estimated state and the covariance
is the covariance of the estimate.
"""
if state_count is None:
state_count = kalman_filter.state_count
state_count = int(state_count)
if state_count < 0 or state_count > kalman_filter.state_count:
raise ValueError("Invalid state count: {}".format(state_count))
# No states to return?
if state_count == 0:
return []
# Initialise with final posterior estimate
states = [None] * state_count
states[-1] = kalman_filter.posterior_state_estimates[-1]
priors = kalman_filter.prior_state_estimates
posteriors = kalman_filter.posterior_state_estimates
# Work backwards from final state
for k in range(state_count-2, -1, -1):
process_mat = kalman_filter.process_matrices[k+1]
cmat = posteriors[k].cov.dot(process_mat.T).dot(
np.linalg.inv(priors[k+1].cov))
# Calculate smoothed state and covariance
states[k] = MultivariateNormal(
mean=posteriors[k].mean + cmat.dot(states[k+1].mean -
priors[k+1].mean),
cov=posteriors[k].cov + cmat.dot(states[k+1].cov -
priors[k+1].cov).dot(cmat.T)
)
return states | 6b3df9f2015c525a385114b657954dba1ec731a2 | 10,581 |
def play_episode(args, sess, env, qnet, e):
"""
Actually plays a single game and performs updates once we have enough
experiences.
:param args: parser.parse_args
:param sess: tf.Session()
:param env: gym.make()
:param qnet: class which holds the NN to play and update.
:param e: chance of a random action selection.
:return: reward earned in the game, update value of e, transitions updated
against.
"""
done = False
_ = env.reset()
reward = 0 # total reward for this episode
turn = 0
transitions = 0 # updates * batch_size
terminal = True # Anytime we lose a life, and beginning of episode.
while not done:
if terminal:
terminal = False
# To make sure that the agent doesn't just learn to set up well for
# the way the game starts, begin the game by not doing anything and
# letting the ball move.
for _ in range(np.random.randint(1, args.random_starts)):
# Perform random actions at the beginning so the network doesn't
# just learn a sequence of steps to always take.
img, _, done, info = env.step(env.action_space.sample())
img = preprocess_img(img)
state = np.stack((img, img, img, img), axis=2)
lives = info['ale.lives']
if done:
# If lost our last life during random_start, nothing left to play
break
# Perform an action
action = qnet.predict(sess, np.array([state]))[0]
if np.random.rand(1) < e:
action = qnet.rand_action()
img, r, done, info = env.step(action)
# Store as an experience
img = np.reshape(preprocess_img(img), (85, 80, 1))
next_state = np.concatenate((state[:, :, 1:], img), axis=2)
if info['ale.lives'] < lives:
terminal = True
qnet.add_experience(state, action, r, next_state, terminal)
# Updates
if qnet.exp_buf_size() > args.begin_updates and\
turn % (qnet.batch_size // 8) == 0:
# Once we have enough experiences in the buffer we can
# start learning. We want to use each experience on average 8 times
# so that's why for a batch size of 8 we would update every turn.
qnet.update(sess)
transitions += qnet.batch_size
if e > args.e_f:
# Reduce once for every update on 8 states. This makes e
# not dependent on the batch_size.
e -= (qnet.batch_size*(args.e_i - args.e_f)) / args.e_anneal
# Prep for the next turn
state = next_state
reward += r
turn += 1
return reward, e, transitions | 2a1c257a1c03dc71e1b7f5a4a102eb5ab080fa49 | 10,582 |
def to_categorical(y, num_columns):
"""Returns one-hot encoded Variable"""
y_cat = np.zeros((y.shape[0], num_columns))
y_cat[range(y.shape[0]), y.astype(int)] = 1.0
return Variable(FloatTensor(y_cat)) | 29918d01c011daec2f87d5ee0075899ed9c366d8 | 10,583 |
def extract_light_positions_for_rays(ray_batch, scene_info, light_pos):
"""Extract light positions for a batch of rays.
Args:
ray_batch: [R, M] tf.float32.
scene_info: Dict.
light_pos: Light position.
Returns:
light_positions: [R, 3] tf.float32.
"""
ray_sids = extract_slice_from_ray_batch( # [R,]
ray_batch=ray_batch, key='metadata')
light_positions = scene_utils.extract_light_positions_for_sids(
sids=ray_sids, # [R, 3]
scene_info=scene_info,
light_pos=light_pos)
return light_positions | 490afd9b98dfb6f28a67e32be38f1dda9486ee63 | 10,585 |
def cdf(x, c, loc=0, scale=1):
"""Return the cdf
:param x:
:type x:
:param c:
:type c:
:param loc:
:type loc:
:param scale:
:type scale:
:return:
:rtype:
"""
x = (x - loc) / scale
try:
c = round(c, 15)
x = np.log(1 - c * x) / c
return 1.0 / (1 + np.exp(x))
except ZeroDivisionError:
return 1.0 / (1 + np.exp(-x)) | 22ed6c2972c0cfc6c096939b630620555f9f0c3d | 10,586 |
import json
import yaml
def load_dict(file_name):
"""
Reads JSON or YAML file into a dictionary
"""
if file_name.lower().endswith(".json"):
with open(file_name) as _f:
return json.load(_f)
with open(file_name) as _f:
return yaml.full_load(_f) | a098a8582e22fba2c9c2b72fbf3e3f769f740a98 | 10,588 |
import requests
import re
def get_next_number(num, proxies='', auth=''):
"""
Returns the next number in the chain
"""
url = 'http://www.pythonchallenge.com/pc/def/linkedlist.php'
res = requests.get('{0}?nothing={1}'.format(url, num), proxies=proxies, auth=auth)
dat = res.content
pattern = re.compile(r'next nothing is (\d+)')
match = pattern.findall(dat)
if match:
get_next_number(match[0], proxies=proxies, auth=auth)
else:
if "Divide" in dat:
get_next_number(int(num)/2, proxies=proxies, auth=auth)
else:
return dat | 74fe86908363e6272452d5b08dfdf5745af99758 | 10,589 |
import io
import gzip
import json
def gzip_str(g_str):
"""
Transform string to GZIP coding
Args:
g_str (str): string of data
Returns:
GZIP bytes data
"""
compressed_str = io.BytesIO()
with gzip.GzipFile(fileobj=compressed_str, mode="w") as file_out:
file_out.write((json.dumps(g_str).encode()))
bytes_obj = compressed_str.getvalue()
return bytes_obj | 05d503e7b4a4ad69b6951146c787486048bc6f7e | 10,590 |
import _locale
def get_summoner_spells():
"""
https://developer.riotgames.com/api/methods#!/968/3327
Returns:
SummonerSpellList: all the summoner spells
"""
request = "{version}/summoner-spells".format(version=cassiopeia.dto.requests.api_versions["staticdata"])
params = {"tags": "all"}
if _locale:
params["locale"] = _locale
return cassiopeia.type.dto.staticdata.SummonerSpellList(cassiopeia.dto.requests.get(request, params, True)) | 7667c97c474578fbe771bdeaccd1d6b4f0a2d5d0 | 10,591 |
def solve_covariance(u) -> np.ndarray:
"""Solve covariance matrix from moments
Parameters
----------
u : List[np.ndarray]
List of moments as defined by the ``get_moments()`` method call
of a BayesPy node object.
"""
cov = u[1] - np.outer(u[0], u[0])
return cov if cov.shape != (1, 1) else np.array(cov.sum()) | 234698e7ae1baf2d471791888e2b488c59cd5e88 | 10,594 |
def interact_ids(*columns: Array) -> Array:
"""Create interactions of ID columns."""
interacted = columns[0].flatten().astype(np.object)
if len(columns) > 1:
interacted[:] = list(zip(*columns))
return interacted | 6c0657723097b472e03abaf9d784fedf447463bc | 10,595 |
def _get_depthwise():
"""
We ask the user to input a value for depthwise.
Depthwise is an integer hyperparameter that is used in the
mobilenet-like model. Please refer to famous_cnn submodule
or to mobilenets paper
# Default: 1
"""
depth = ''
while depth not in ['avg', 'max']:
demand = "Please choose a value for pooling argument that is `avg`\
or `max`\n"
pooling = str(get_input(demand))
return pooling | c102b534b4e9143917fde369670f9aaaad321f7b | 10,596 |
def LoadPartitionConfig(filename):
"""Loads a partition tables configuration file into a Python object.
Args:
filename: Filename to load into object
Returns:
Object containing disk layout configuration
"""
valid_keys = set(('_comment', 'metadata', 'layouts', 'parent'))
valid_layout_keys = set((
'_comment', 'num', 'blocks', 'block_size', 'fs_blocks', 'fs_block_size',
'uuid', 'label', 'format', 'fs_format', 'type', 'features',
'size', 'fs_size', 'fs_options', 'erase_block_size', 'hybrid_mbr',
'reserved_erase_blocks', 'max_bad_erase_blocks', 'external_gpt',
'page_size', 'size_min', 'fs_size_min'))
valid_features = set(('expand',))
config = _LoadStackedPartitionConfig(filename)
try:
metadata = config['metadata']
for key in ('block_size', 'fs_block_size'):
metadata[key] = ParseHumanNumber(metadata[key])
unknown_keys = set(config.keys()) - valid_keys
if unknown_keys:
raise InvalidLayout('Unknown items: %r' % unknown_keys)
if len(config['layouts']) <= 0:
raise InvalidLayout('Missing "layouts" entries')
if not BASE_LAYOUT in config['layouts'].keys():
raise InvalidLayout('Missing "base" config in "layouts"')
for layout_name, layout in config['layouts'].iteritems():
if layout_name == '_comment':
continue
for part in layout:
unknown_keys = set(part.keys()) - valid_layout_keys
if unknown_keys:
raise InvalidLayout('Unknown items in layout %s: %r' %
(layout_name, unknown_keys))
if part.get('num') == 'metadata' and 'type' not in part:
part['type'] = 'blank'
if part['type'] != 'blank':
for s in ('num', 'label'):
if not s in part:
raise InvalidLayout('Layout "%s" missing "%s"' % (layout_name, s))
if 'size' in part:
if 'blocks' in part:
raise ConflictingOptions(
'%s: Conflicting settings are used. '
'Found section sets both \'blocks\' and \'size\'.' %
part['label'])
part['bytes'] = ParseHumanNumber(part['size'])
if 'size_min' in part:
size_min = ParseHumanNumber(part['size_min'])
if part['bytes'] < size_min:
part['bytes'] = size_min
part['blocks'] = part['bytes'] / metadata['block_size']
if part['bytes'] % metadata['block_size'] != 0:
raise InvalidSize(
'Size: "%s" (%s bytes) is not an even number of block_size: %s'
% (part['size'], part['bytes'], metadata['block_size']))
if 'fs_size' in part:
part['fs_bytes'] = ParseHumanNumber(part['fs_size'])
if 'fs_size_min' in part:
fs_size_min = ParseHumanNumber(part['fs_size_min'])
if part['fs_bytes'] < fs_size_min:
part['fs_bytes'] = fs_size_min
if part['fs_bytes'] <= 0:
raise InvalidSize(
'File system size "%s" must be positive' %
part['fs_size'])
if part['fs_bytes'] > part['bytes']:
raise InvalidSize(
'Filesystem may not be larger than partition: %s %s: %d > %d' %
(layout_name, part['label'], part['fs_bytes'], part['bytes']))
if part['fs_bytes'] % metadata['fs_block_size'] != 0:
raise InvalidSize(
'File system size: "%s" (%s bytes) is not an even number of '
'fs blocks: %s' %
(part['fs_size'], part['fs_bytes'], metadata['fs_block_size']))
if part.get('format') == 'ubi':
part_meta = GetMetadataPartition(layout)
page_size = ParseHumanNumber(part_meta['page_size'])
eb_size = ParseHumanNumber(part_meta['erase_block_size'])
ubi_eb_size = eb_size - 2 * page_size
if (part['fs_bytes'] % ubi_eb_size) != 0:
# Trim fs_bytes to multiple of UBI eraseblock size.
fs_bytes = part['fs_bytes'] - (part['fs_bytes'] % ubi_eb_size)
raise InvalidSize(
'File system size: "%s" (%d bytes) is not a multiple of UBI '
'erase block size (%d). Please set "fs_size" to "%s" in the '
'"common" layout instead.' %
(part['fs_size'], part['fs_bytes'], ubi_eb_size,
ProduceHumanNumber(fs_bytes)))
if 'blocks' in part:
part['blocks'] = ParseHumanNumber(part['blocks'])
part['bytes'] = part['blocks'] * metadata['block_size']
if 'fs_blocks' in part:
max_fs_blocks = part['bytes'] / metadata['fs_block_size']
part['fs_blocks'] = ParseRelativeNumber(max_fs_blocks,
part['fs_blocks'])
part['fs_bytes'] = part['fs_blocks'] * metadata['fs_block_size']
if part['fs_bytes'] > part['bytes']:
raise InvalidLayout(
'Filesystem may not be larger than partition: %s %s: %d > %d' %
(layout_name, part['label'], part['fs_bytes'], part['bytes']))
if 'erase_block_size' in part:
part['erase_block_size'] = ParseHumanNumber(part['erase_block_size'])
if 'page_size' in part:
part['page_size'] = ParseHumanNumber(part['page_size'])
part.setdefault('features', [])
unknown_features = set(part['features']) - valid_features
if unknown_features:
raise InvalidLayout('%s: Unknown features: %s' %
(part['label'], unknown_features))
except KeyError as e:
raise InvalidLayout('Layout is missing required entries: %s' % e)
return config | dab5fe288044587c9cc32ad9c6811ea0c79ea75a | 10,597 |
def get_own():
"""Returns the instance on which the caller is running.
Returns a boto.ec2.instance.Instance object augmented by tag attributes.
IMPORTANT: This method will raise an exception if the network
fails. Don't forget to catch it early because we must recover from
this, fast. Also, it will throw an exception if you are not running
under EC2, so it is preferable to use is_running_on_ec2 before calling
this method.
"""
try:
instance_id = _query("instance-id")
if not instance_id:
raise NoEc2Instance(
"Can't find own instance id. Are you running under EC2?")
return filter(lambda i: i.id == instance_id, all())[0]
except EC2ResponseError:
raise NoEc2Instance("Cannot find instance %r" % instance_id) | 056f47a47036fc1ba0f778d81ce8234dfc122a8f | 10,598 |
from datetime import datetime
from typing import List
from typing import Mapping
def create_report(
name: str,
description: str,
published: datetime,
author: Identity,
object_refs: List[_DomainObject],
external_references: List[ExternalReference],
object_marking_refs: List[MarkingDefinition],
report_status: int,
report_type: str,
confidence_level: int,
labels: List[str],
files: List[Mapping[str, str]],
) -> STIXReport:
"""Create a report."""
return STIXReport(
created_by_ref=author,
name=name,
description=description,
published=published,
object_refs=object_refs,
labels=labels,
external_references=external_references,
object_marking_refs=object_marking_refs,
confidence=confidence_level,
report_types=[report_type],
custom_properties={
"x_opencti_report_status": report_status,
"x_opencti_files": files,
},
) | 81331d99b95540a9d7b447cf50a996965d9e2a10 | 10,599 |
def get_curve_points(
road: Road,
center: np.ndarray,
road_end: np.ndarray,
placement_offset: float,
is_end: bool,
) -> list[np.ndarray]:
"""
:param road: road segment
:param center: road intersection point
:param road_end: end point of the road segment
:param placement_offset: offset based on placement tag value
:param is_end: whether the point represents road end
"""
width: float = road.width / 2.0 * road.scale
direction: np.ndarray = (center - road_end) / np.linalg.norm(
center - road_end
)
if is_end:
direction = -direction
left: np.ndarray = turn_by_angle(direction, np.pi / 2.0) * (
width + placement_offset
)
right: np.ndarray = turn_by_angle(direction, -np.pi / 2.0) * (
width - placement_offset
)
return [road_end + left, center + left, center + right, road_end + right] | 9bc22c1894332a70e904d4c543606c3f38606064 | 10,600 |
def default_inputs_folder_at_judge(receiver):
"""
When a receiver is added to a task and `receiver.send_to_judge` is checked,
this function will be used to automatically set the name of the folder with inputs at judge server.
When this function is called SubmitReceiver object is created but is not saved in database yet.
"""
return '{}-{}'.format(submit_settings.JUDGE_INTERFACE_IDENTITY, receiver.id) | 0f45a374a32feb19ffe17d394831123ca8af68c8 | 10,601 |
def remove_extension(string):
""" Removes the extention from a string, as well as the directories.
This function may fail if more than one . is in the file, such as ".tar.gz"
Args:
string: (string): either a path or a filename that for a specific file, with extension.
(e.g. /usr/dir/sample.mitograph or sample.mitograph)
Returns:
filename_without_extension (str): just the filename without the extension (e.g. "sample")
"""
# Remove all enclosing directories, only get the name of file.
cur_filename_with_extension = remove_enclosing_dirs(string)
# Remove the extension by splitting the string at each "." and only taking first part.
filename_without_extension = cur_filename_with_extension.split(".")[0]
return filename_without_extension | 8bdd3818696745c5955dfb5bd7725d87e1284103 | 10,602 |
def _theme_static(path):
"""
Serve arbitrary files.
"""
return static_app.static(path, 'theme') | c93815b041632c313961afbe7ef254117c4259de | 10,603 |
def create_link(link: schemas.Link, db: Session = Depends(get_db)):
"""Create link
"""
# Check if the target already exists
db_link = crud.get_link_by_target(db=db, target=link.target)
if db_link:
raise HTTPException(status_code=400, detail="link already registered")
response = crud.create_link(db=db, link=link)
payload = {'link': response.link}
return JSONResponse(content=payload) | b220403b6d054df0f0e6f0538573038b0f7895b3 | 10,604 |
def encode_rsa_public_key(key):
"""
Encode an RSA public key into PKCS#1 DER-encoded format.
:param PublicKey key: RSA public key
:rtype: bytes
"""
return RSAPublicKey({
'modulus': int.from_bytes(key[Attribute.MODULUS], byteorder='big'),
'public_exponent': int.from_bytes(key[Attribute.PUBLIC_EXPONENT],
byteorder='big'),
}).dump() | 38b1c3b4ee361415fa8587df7dbfdd94d00fdbe1 | 10,605 |
def is_block_valid(new_block, old_block):
"""
simple verify if the block is valid.
"""
if old_block["Index"] + 1 != new_block["Index"]:
return False
if old_block["Hash"] != new_block["PrevHash"]:
return False
if caculate_hash(new_block) != new_block["Hash"]:
return False
return True | 8447ca7b7bbb75748601d4a79d97047ad7ef07ab | 10,606 |
import urllib
def add_get_parameter(url, key, value):
"""
Utility method to add an HTTP request parameter to a GET request
"""
if '?' in url:
return url + "&%s" % urllib.urlencode([(key, value)])
else:
return url + "?%s" % urllib.urlencode([(key, value)]) | 640de1f111ff9080386f855e220e8eaaad113a0a | 10,607 |
def get_simple_match(text):
"""Returns a word instance in the dictionary, selected by a simplified String match"""
# Try to find a matching word
try:
result = word.get(word.normalized_text == text)
return result
except peewee.DoesNotExist:
return None | e8365b6129e452eb17696daf8638a573e8d0cb4b | 10,608 |
def ipv_plot_df(points_df, sample_frac=1, marker='circle_2d', size=0.2, **kwargs):
"""Plot vertices in a dataframe using ipyvolume."""
if sample_frac < 1:
xyz = random_sample(points_df, len(points_df), sample_frac)
else:
xyz = dict(x=points_df['x'].values, y=points_df['y'].values, z=points_df['z'].values)
fig = ipv.scatter(**xyz, marker=marker, size=size, **kwargs)
return fig | a957629bcf7b9acbff314f243a3cae9803bda69d | 10,609 |
def admin_login():
"""
This function is used to show the admin login page
:return: admin_login.html
"""
return render_template("admin_login.html") | 495841f7cb352a07d8214f99b99ca8be7179839f | 10,611 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.