content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def german_weekday_name(date):
"""Return the german weekday name for a given date."""
days = [u'Montag', u'Dienstag', u'Mittwoch', u'Donnerstag', u'Freitag', u'Samstag', u'Sonntag']
return days[date.weekday()] | 7d2919c61438ec913abe38cccd924bb69f866655 | 5,578 |
def load_data(database_filepath):
"""
Input:
database_filepath - path of the cleaned data file
Output:
X and Y for model training
Category names
"""
# load data from database
engine = create_engine('sqlite:///{}'.format(database_filepath))
df = pd.read_sql("SELECT * FROM df_clean", engine)
X = df['message']
Y = df.iloc[0:, 4:]
category_names = Y.columns
return X, Y, category_names | 8647722a0b97a8130bfadfa6dec45fb71c9e6fe3 | 5,579 |
from scipy.special import lpmn, factorial
def real_spherical_harmonics(phi, theta, l, m):
"""Real spherical harmonics, also known as tesseral spherical harmonics
with condon shortley phase.
Only for scalar phi and theta!!!
"""
if m == 0:
y = np.sqrt(
(2 * l + 1) / (4 * np.pi)
) * lpmn(m, l, np.cos(theta))[0][-1][-1]
elif m < 0:
y = (-1)**m * np.sqrt(2) * np.sqrt(
(2 * l + 1) / (4 * np.pi) * \
factorial(l - np.abs(m)) / factorial(l + np.abs(m))
) * lpmn(np.abs(m), l, np.cos(theta))[0][-1][-1] * np.sin(np.abs(m) * phi)
elif m > 0:
y = (-1)**m * np.sqrt(2) * np.sqrt(
(2 * l + 1) / (4 * np.pi) * \
factorial(l - np.abs(m)) / factorial(l + np.abs(m))
) * lpmn(np.abs(m), l, np.cos(theta))[0][-1][-1] * np.cos(np.abs(m) * phi)
return y | 5c12cf5263676fccc2dee40c54670ea5150e2cfc | 5,580 |
from typing import Callable
def get_replace_function(replace_multiple: bool) -> Callable:
"""given bool:replace_multiple flag,
return replace function from modifier
"""
if replace_multiple:
return distend.modifier.replace_multiple
else:
return distend.modifier.replace_single | 6bb05bb4dd8b28f8581e576aa0f086b55eb7cae6 | 5,581 |
def accuracy(X,Y,w):
"""
First, evaluate the classifier on training data.
"""
n_correct = 0
for i in range(len(X)):
if predict(w, X[i]) == Y[i]:
n_correct += 1
return n_correct * 1.0 / len(X) | bdc68859ec7d1f011dc04f641565e44aaeffe908 | 5,582 |
from typing import List
def reduce_matrix(indices_to_remove: List[int], matrix: np.ndarray) -> np.ndarray:
"""
Removes indices from indices_to_remove from binary associated to indexing of matrix,
producing a new transition matrix.
To do so, it assigns all transition probabilities as the given state in the remaining
indices binary, with the removed binary in state 0. This is an assumption on the noise made
because it is likely that unmeasured qubits will be in that state.
:param indices_to_remove: Binary index of state matrix is mapping to be removed.
:type indices_to_remove: List[int]
:param matrix: Transition matrix where indices correspond to some binary state, to have some
dimension removed.
:type matrix: np.ndarray
:return: Transition matrix with removed entries.
:rtype: np.ndarray
"""
new_n_qubits = int(log2(matrix.shape[0])) - len(indices_to_remove)
if new_n_qubits == 0:
return np.array([])
bin_map = dict()
mat_dim = 1 << new_n_qubits
for index in range(mat_dim):
# get current binary
bina = list(int_to_binary(index, new_n_qubits))
# add 0's to fetch old binary to set values from
for i in sorted(indices_to_remove):
bina.insert(i, 0)
# get index of values
bin_map[index] = binary_to_int(tuple(bina))
new_mat = np.zeros((mat_dim,) * 2, dtype=float)
for i in range(len(new_mat)):
old_row_index = bin_map[i]
for j in range(len(new_mat)):
old_col_index = bin_map[j]
new_mat[i, j] = matrix[old_row_index, old_col_index]
return new_mat | dac7755b63593044a7df1658d3205572a935e64d | 5,583 |
def kdj(df, n=9):
"""
随机指标KDJ
N日RSV=(第N日收盘价-N日内最低价)/(N日内最高价-N日内最低价)×100%
当日K值=2/3前1日K值+1/3×当日RSV=SMA(RSV,M1)
当日D值=2/3前1日D值+1/3×当日K= SMA(K,M2)
当日J值=3 ×当日K值-2×当日D值
"""
_kdj = pd.DataFrame()
_kdj['date'] = df['date']
rsv = (df.close - df.low.rolling(n).min()) / (df.high.rolling(n).max() - df.low.rolling(n).min()) * 100
_kdj['k'] = sma(rsv, 3)
_kdj['d'] = sma(_kdj.k, 3)
_kdj['j'] = 3 * _kdj.k - 2 * _kdj.d
return _kdj | 7aa88cd6ee972063a2bd45b1b5b83da0255b336c | 5,584 |
def identity_func(x):
"""The identify (a.k.a. transparent) function that returns it's input as is."""
return x | 06e0296c338d68663aa87d08b21f84919be3f85e | 5,585 |
def make_choice_validator(
choices, default_key=None, normalizer=None):
"""
Returns a callable that accepts the choices provided.
Choices should be provided as a list of 2-tuples, where the first
element is a string that should match user input (the key); the
second being the value associated with the key.
The callable by default will match, upon complete match the first
value associated with the result will be returned. Partial matches
are supported.
If a default is provided, that value will be returned if the user
provided input is empty, i.e. the value that is mapped to the empty
string.
Finally, a normalizer function can be passed. This normalizes all
keys and validation value.
"""
def normalize_all(_choices):
# normalize all the keys for easier comparison
if normalizer:
_choices = [(normalizer(key), value) for key, value in choices]
return _choices
choices = normalize_all(choices)
def choice_validator(value):
if normalizer:
value = normalizer(value)
if not value and default_key:
value = choices[default_key][0]
results = []
for choice, mapped in choices:
if value == choice:
return mapped
if choice.startswith(value):
results.append((choice, mapped))
if len(results) == 1:
return results[0][1]
elif not results:
raise ValueError('Invalid choice.')
else:
raise ValueError(
'Choice ambiguous between (%s)' % ', '.join(
k for k, v in normalize_all(results))
)
return choice_validator | 65ac672f16a1031a9051bc4f6769c6b1b88db727 | 5,586 |
import time
def find_best_polycomp_parameters(samples, num_of_coefficients_range,
samples_per_chunk_range, max_error,
algorithm, delta_coeffs=1, delta_samples=1,
period=None, callback=None, max_iterations=0):
"""Performs an optimized search of the best configuration in the
parameter space given by "num_of_coefficients_space" and
"samples_per_chunk_space"."""
optimization_start_time = time.clock()
x_range = num_of_coefficients_range
y_range = samples_per_chunk_range
midpoint_x, midpoint_y = [int(np.mean(k)) for k in (x_range, y_range)]
param_points = PointCache(samples=samples,
max_allowable_error=max_error,
algorithm=algorithm,
period=period)
# The logic of this code is the following:
#
# 1. Start from a point (x, y)
# 2. Sample the point and all its neighbours
# 3. Move to the best point among the nine that have been sampled
# 4. Repeat from point 2. until the best point is the current one
#
# Many points will be sampled more than once, but we use a
# "PointCache" object to do all the sampling, so that only newer
# points need to be recalculated every time.
num_of_steps = 1
dx = delta_coeffs
dy = delta_samples
while True:
ring_of_points = [(-dx, -dy), (0, -dy), (dx, -dy),
(-dx, 0), (0, 0), (dx, 0),
(-dx, dy), (0, dy), (dx, dy)]
ring_of_configurations = []
for dx, dy in ring_of_points:
cur_x = midpoint_x + dx
cur_y = midpoint_y + dy
if cur_x < x_range[0] or cur_x > x_range[1]:
continue
if cur_y < y_range[0] or cur_y > y_range[1]:
continue
chunks, params = param_points.get_point(cur_x, cur_y)
if callback is not None:
callback(cur_x, cur_y, params, num_of_steps)
ring_of_configurations.append((cur_x, cur_y, chunks, params))
ring_of_configurations.sort(key=lambda p: p[3].compr_data_size)
best_x, best_y, best_chunks, best_params = ring_of_configurations[0]
# If we have ran too much iterations, stop bothering and exit the loop
num_of_steps += 1
if (max_iterations > 0) and num_of_steps > max_iterations:
break
# If we're centered on the best value, let's explore a
# narrower space around it
if (best_x, best_y) == (midpoint_x, midpoint_y):
repeat = False
# Can the ring be shrunk any further? If so, shrink it and
# keep iterating
if (dx > 1) or (dy > 1):
# If dx == dy, we prefer to reduce dy first
if dy > dx:
dy = dy // 2
else:
dx = dx // 2
repeat = True
if repeat:
continue
else:
break
midpoint_x, midpoint_y = best_x, best_y
return (best_params,
list(param_points.parameter_space.values()),
num_of_steps) | 47f076634c50cc18c760b7c60909a2d63a19fd3e | 5,587 |
def moving_average(data, window_size=100): #used this approach https://stackoverflow.com/questions/11352047/finding-moving-average-from-data-points-in-python
"""
Calculates a moving average for all the data
Args:
data: set of values
window_size: number of data points to consider in window
Returns:
Moving average of the data
"""
cumsum_vec = np.cumsum(np.insert(data, 0, 0))
ma_vec = (cumsum_vec[window_size:] - cumsum_vec[:-window_size]) / window_size
return ma_vec | 8f04d659081a68c4287024e2b6567f257f7b9d92 | 5,588 |
import re
def _change_TRAVDV_to_TRAVdashDV(s:str):
"""
Reconciles mixcr name like TRAV29/DV5*01 to tcrdist2 name TRAV29DV5*01
Parameters
----------
s : str
Examples
--------
>>> _change_TRAVDV_to_TRAVdashDV('TRAV29DV5*01')
'TRAV29/DV5*01'
>>> _change_TRAVDV_to_TRAVdashDV('TRAV38-2DV8*01')
'TRAV38-2/DV8*01'
>>> _change_TRAVDV_to_TRAVdashDV('TRDV*01')
'TRDV*01'
Notes
-----
This reconciles such gene names to match the tcrdist2 reference db.
see database for more details: repertoire_db.RefGeneSet(db_file = "gammadelta_db.tsv").all_genes
"""
if isinstance(s, str):
m = re.match(pattern = "(TRAV[0-9]+)(DV.*)", string = s)
m2 = re.match(pattern = "(TRAV[0-9]+-[1-2])(DV.*)", string = s)
if m:
new_s = "/".join(m.groups())
return(new_s)
elif m2:
new_s = "/".join(m2.groups())
return(new_s)
else:
return(s)
else:
return(np.NaN) | b5df8b51c96ca9695aecc0fcae4589f35b692331 | 5,589 |
def gen_event_type_entry_str(event_type_name, event_type, event_config):
"""
return string like:
{"cpu-cycles", PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES},
"""
return '{"%s", %s, %s},\n' % (event_type_name, event_type, event_config) | ca89c19b45f182b8a7ae74ab76f3f42bddf46811 | 5,590 |
def encode_rotate_authentication_key_script(new_key: bytes) -> Script:
"""# Summary
Rotates the transaction sender's authentication key to the supplied new authentication key.
May
be sent by any account.
# Technical Description
Rotate the `account`'s `DiemAccount::DiemAccount` `authentication_key` field to `new_key`.
`new_key` must be a valid ed25519 public key, and `account` must not have previously delegated
its `DiemAccount::KeyRotationCapability`.
# Parameters
| Name | Type | Description |
| ------ | ------ | ------------- |
| `account` | `&signer` | Signer reference of the sending account of the transaction. |
| `new_key` | `vector<u8>` | New ed25519 public key to be used for `account`. |
# Common Abort Conditions
| Error Category | Error Reason | Description |
| ---------------- | -------------- | ------------- |
| `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. |
| `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. |
# Related Scripts
* `Script::rotate_authentication_key_with_nonce`
* `Script::rotate_authentication_key_with_nonce_admin`
* `Script::rotate_authentication_key_with_recovery_address`
"""
return Script(
code=ROTATE_AUTHENTICATION_KEY_CODE,
ty_args=[],
args=[TransactionArgument__U8Vector(value=new_key)],
) | 6235409d0232e29d42de22a7bec2285adfd0db38 | 5,591 |
def retrieval_visualizations(model, savefig=True):
"""
Plots incremental retrieval contexts and supports, as heatmaps, and prints recalled items.
**Required model attributes**:
- item_count: specifies number of items encoded into memory
- context: vector representing an internal contextual state
- experience: adding a new trace to the memory model
- activations: function returning item activations given a vector probe
- outcome_probabilities: function returning item supports given a set of activations
**Also** uses savefig: boolean deciding whether figures are saved (True) or displayed
"""
retrieval_contexts, retrieval_supports, recall = retrieval_states(model)
plot_states(retrieval_contexts, 'Retrieval Contexts', savefig=savefig)
plot_states(retrieval_supports, 'Supports For Each Item At Each Increment of Retrieval',
savefig=savefig)
return recall | 96c4534a5e3057fb1bfd15068eec8cc61767c01d | 5,592 |
from pathlib import Path
def get_force_charge() -> str:
"""
Gets the command object for the force charge command
Returns:
The command object as a json string
"""
force_charge = Path('force_charge.json').read_text()
return force_charge | c67277c62664419c3b4a19ae57ea6de027c60416 | 5,593 |
def prune_motifs(ts, sorted_dic_list, r):
"""
:param ts: 1-dimensional time-series either resulting from the PCA method or the original 1-dimensional time-series
:type ts: 1d array
:param sorted_dic_list: list of motif dictionaries returned from the emd algorithm, ordered by relevance
:type sorted_dic_list: list of dic
:param r: maximum distance to the center of the motif
:type r: float
:return: list of dictionaries with the most relevant motifs
:rtype: list of dic
"""
pruned_motif_dic_list = [sorted_dic_list[0]]
first_center_ts = extract_ts_from_pointers(ts, sorted_dic_list[0]['center_ts_pointers'])
pruned_center_ts_list = [first_center_ts]
for motif_dic in sorted_dic_list[1:]:
cur_center_ts = extract_ts_from_pointers(ts, motif_dic['center_ts_pointers'])
dist_list = dtwdist.compute_dwt_dist_between_ts_and_list(cur_center_ts, pruned_center_ts_list, 2 * r)
dist_test_list = [dist <= 2 * r for dist in dist_list]
if sum(dist_test_list) == 0:
pruned_motif_dic_list.append(motif_dic)
pruned_center_ts_list.append(cur_center_ts)
else:
continue
return pruned_motif_dic_list | 4fef0a51da25503548f6df59e09705c731a7fc6c | 5,594 |
def xor_columns(col, parity):
""" XOR a column with the parity values from the state """
result = []
for i in range(len(col)):
result.append(col[i] ^ parity[i])
return result | 2eff4dbf3edf2b97410e7bef17c043a30b1f3aa8 | 5,595 |
def initiate_default_resource_metadata(aws_resource):
"""
:type aws_resource: BaseAWSObject
"""
if not isinstance(aws_resource, BaseAWSObject):
raise TypeError
try:
metadata = aws_resource.Metadata
if not isinstance(metadata, dict):
raise TypeError("`troposphere.BaseAWSObject.Metadata` is not a dict!")
except Exception as e:
if "is not a dict!" in str(e):
raise e
metadata = {}
metadata.setdefault(TROPOSPHERE_METADATA_FIELD_NAME, {})
aws_resource.Metadata = metadata
return metadata | 4a510dd5a69f2499b407396f34818c79eead7c6a | 5,596 |
def token_vault_single(chain, team_multisig, token, freeze_ends_at, token_vault_balances) -> Contract:
"""Another token vault deployment with a single customer."""
total = 1000
args = [
team_multisig,
freeze_ends_at,
token.address,
total,
0 # Disable the tap
]
contract, hash = chain.provider.deploy_contract('TokenVault', deploy_args=args)
return contract | b42857cb7becacde9d5638f18f6dd7625eabb182 | 5,597 |
import re
from typing import OrderedDict
def read_eep_track(fp, colnames=None):
""" read MIST eep tracks """
# read lines
f = open(fp, "r+")
s = f.readlines()
# get info
MIST_version = re.split(r"\s+", s[0].strip())[-1]
MESA_revision = re.split(r"\s*", s[1].strip())[-1]
Yinit, Zinit, FeH, aFe, vvcrit = re.split(r"\s*", s[4].strip())[1:]
Yinit = np.float(Yinit)
Zinit = np.float(Zinit)
FeH = np.float(FeH)
aFe = np.float(aFe)
vvcrit = np.float(vvcrit)
initial_mass, N_pts, N_EEP, N_col, phase, type_ = \
re.split(r"\s*", s[7].strip())[1:]
initial_mass = np.float(initial_mass)
N_pts = np.int(N_pts)
N_EEP = np.int(N_EEP)
N_col = np.int(N_col)
# get eep info
EEPs = [np.int(_) for _ in re.split(r"\s+", s[8].strip())[2:]]
eep = np.arange(EEPs[0], EEPs[-1] + 1)
# add eep column
# _eep
t = Table.read(s[11:], format="ascii.commented_header")
t.add_column(Column(eep, "_eep"))
# _lgmass
t.add_column(Column(np.ones(len(t), )*np.log10(initial_mass), "_lgmass"))
# _lgage
t.add_column(Column(np.log10(t["star_age"].data), "_lgage"))
# _feh
t.add_column(Column(np.ones(len(t), ) * FeH, "_feh"))
# add meta info
meta = OrderedDict(
MIST_version=MIST_version,
MESA_revision=MESA_revision,
Yinit=Yinit,
Zinit=Zinit,
FeH=FeH,
aFe=aFe,
vvcrit=vvcrit,
initial_mass=initial_mass,
N_pts=N_pts,
N_EEP=N_EEP,
N_col=N_col,
phase=phase,
type_=type_,
EEPs=EEPs,
INTERP=("_INTERP" in fp)
)
t.meta = meta
if colnames is None:
return t
else:
for colname in colnames:
try:
assert colname in t.colnames
except AssertionError as ae:
raise(ae("{} not in track.colnames!!!".format(colname)))
return t | 551c8e5ba05aec5f32d9184398427fb003db78ba | 5,599 |
def create_model():
"""ResNet34 inspired analog model.
Returns:
nn.Modules: created model
"""
block_per_layers = (3, 4, 6, 3)
base_channel = 16
channel = (base_channel, 2*base_channel, 4*base_channel)
l0 = nn.Sequential(
nn.Conv2d(3, channel[0], kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(channel[0]),
nn.ReLU()
)
l1 = nn.Sequential(*concatenate_layer_blocks(channel[0], channel[0], block_per_layers[0],
first_layer=True))
l2 = nn.Sequential(*concatenate_layer_blocks(channel[0], channel[1], block_per_layers[1]))
l3 = nn.Sequential(*concatenate_layer_blocks(channel[1], channel[2], block_per_layers[2]))
l4 = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
nn.Linear(channel[2], N_CLASSES)
)
return nn.Sequential(l0, l1, l2, l3, l4) | fa9157c457b31b8bd0c3ca0b5dac8f68734486ec | 5,601 |
from typing import Union
from typing import Optional
def kv_get(key: Union[str, bytes],
*,
namespace: Optional[str] = None) -> bytes:
"""Fetch the value of a binary key."""
if isinstance(key, str):
key = key.encode()
assert isinstance(key, bytes)
return global_state_client.kv_get(key, namespace) | 03be836a3d42f39f2b28b2f3ea557cdf39918bcd | 5,602 |
def construct_model(data, local_settings, covariate_multipliers, covariate_data_spec):
"""Makes a Cascade model from EpiViz-AT settings and data.
Args:
data: An object with both ``age_specific_death_rate`` and ``locations``.
local_settings: A settings object from ``cascade_plan``.
covariate_multipliers (List[EpiVizCovariateMultiplier]): descriptions of
covariate multipliers.
covariate_data_spec (List[EpiVizCovariate]): the covariates themselves.
Some covariates aren't used by covariate multipliers but are
included to calculate hold outs.
Returns:
cascade.model.Model: The model to fit.
"""
ev_settings = local_settings.settings
parent_location_id = local_settings.parent_location_id
default_age_time = dict()
default_age_time["age"] = np.linspace(0, 100, 21)
default_age_time["time"] = np.linspace(1990, 2015, 6)
for kind in ["age", "time"]:
default_grid = getattr(ev_settings.model, f"default_{kind}_grid")
if default_grid is not None:
default_age_time[kind] = np.sort(np.array(default_grid, dtype=np.float))
# Use this age and time when a smooth grid doesn't depend on age and time.
single_age = default_age_time["age"][:1]
single_time = [default_age_time["time"][len(default_age_time["time"]) // 2]]
single_age_time = (single_age, single_time)
nonzero_rates = [smooth.rate for smooth in ev_settings.rate]
children = list(data.locations.successors(parent_location_id))
model = Model(
nonzero_rates=nonzero_rates,
parent_location=parent_location_id,
child_location=children,
covariates=covariates_list(covariate_data_spec),
weights=None,
)
construct_model_rates(default_age_time, single_age_time, ev_settings, model)
# No random effects if there is only one child.
if children and len(children) > 1:
construct_model_random_effects(default_age_time, single_age_time, ev_settings, model)
construct_model_covariates(default_age_time, single_age_time, covariate_multipliers, model)
asdr = data.age_specific_death_rate
if ev_settings.model.constrain_omega:
constrain_omega(
default_age_time, asdr, ev_settings, model, parent_location_id, children
)
return model | eb4287dcaceb1cf320bf8761143c96ffadc148b2 | 5,603 |
def set_off():
"""
Turns OFF the lamp.
"""
unicorn.set_status(False)
return OK | e82ab948a1656c343237a11e048c1ed38487353b | 5,604 |
def get_all_active_bets():
"""
Gets all the active bets for all
active discord ritoman users
"""
return session.query(LoLBets).filter(LoLBets.completed == false()).all() | 844b36b695bf67db3cff82711b9e17da3db20c8e | 5,605 |
def get_quantize_pos_min_diffs(inputs, f_min, f_max, q_min, q_max, bit_width):
"""Get quantize pos which makes min difference between float and quantzed. """
with tf.name_scope("GetQuantizePosMinDiffs"):
min_scale_inv = tf.math.divide(f_min, q_min)
max_scale_inv = tf.math.divide(f_max, q_max)
float_scale_inv = tf.math.maximum(min_scale_inv, max_scale_inv)
non_overflow_pos = get_quantize_pos_non_overflow(inputs, f_min, f_max,
q_min, q_max)
def calc_pos():
diffs = []
for i in range(5):
with tf.name_scope("FakeQuantizeWithScale_{}".format(i)):
# fake quantize
scale = tf.math.pow(2.0, non_overflow_pos + i, name="scale")
quantized = dpu_symmetry_quantize(inputs, scale, q_min, q_max)
dequantized = dpu_symmetry_dequantize(quantized, scale, q_min, q_max)
diff = tf.pow(inputs - dequantized, 2)
diff = tf.reduce_sum(diff)
diffs.append(diff)
pos_offset = tf.argmin(diffs)
quantize_pos = non_overflow_pos + tf.cast(pos_offset, tf.float32)
return quantize_pos
return tf.cond(float_scale_inv < 1e-9, lambda: 127.0, calc_pos) | 63cc0b8ac370513ecfbe3068d02299a3d3016638 | 5,606 |
def _non_string_elements(x):
"""
Simple helper to check that all values of x are string. Returns all non string elements as (position, element).
:param x: Iterable
:return: [(int, !String), ...]
"""
problems = []
for i in range(0, len(x)):
if not isinstance(x[i], str):
problems.append((i, x[i]))
return problems | 974715622949157693084823a52a88973b51d100 | 5,607 |
from pathlib import Path
def configure_dirs(base_path: str, config_name: str, dataset_name: str) -> str:
"""
Performs configuration of directories for storing vectors
:param base_path:
:param config_name:
:param dataset_name:
:return: Full configuration path
"""
base_path = Path(base_path)
base_path.mkdir(exist_ok=True)
full_path = base_path / config_name
full_path.mkdir(exist_ok=True)
full_path = full_path / dataset_name
full_path.mkdir(exist_ok=True)
return str(full_path) | b4791f836d0414ed582c3093b3dac1727f82a39c | 5,608 |
def config_entry_version_fixture():
"""Define a config entry version fixture."""
return 2 | cac78c1f02668c95ce918d6219cadd5f08ab21c9 | 5,609 |
def edge_dfs(G, source=None, orientation=None):
"""A directed, depth-first-search of edges in `G`, beginning at `source`.
Yield the edges of G in a depth-first-search order continuing until
all edges are generated.
Parameters
----------
G : graph
A directed/undirected graph/multigraph.
source : node, list of nodes
The node from which the traversal begins. If None, then a source
is chosen arbitrarily and repeatedly until all edges from each node in
the graph are searched.
orientation : None | 'original' | 'reverse' | 'ignore' (default: None)
For directed graphs and directed multigraphs, edge traversals need not
respect the original orientation of the edges.
When set to 'reverse' every edge is traversed in the reverse direction.
When set to 'ignore', every edge is treated as undirected.
When set to 'original', every edge is treated as directed.
In all three cases, the yielded edge tuples add a last entry to
indicate the direction in which that edge was traversed.
If orientation is None, the yielded edge has no direction indicated.
The direction is respected, but not reported.
Yields
------
edge : directed edge
A directed edge indicating the path taken by the depth-first traversal.
For graphs, `edge` is of the form `(u, v)` where `u` and `v`
are the tail and head of the edge as determined by the traversal.
For multigraphs, `edge` is of the form `(u, v, key)`, where `key` is
the key of the edge. When the graph is directed, then `u` and `v`
are always in the order of the actual directed edge.
If orientation is not None then the edge tuple is extended to include
the direction of traversal ('forward' or 'reverse') on that edge.
Examples
--------
>>> nodes = [0, 1, 2, 3]
>>> edges = [(0, 1), (1, 0), (1, 0), (2, 1), (3, 1)]
>>> list(nx.edge_dfs(nx.Graph(edges), nodes))
[(0, 1), (1, 2), (1, 3)]
>>> list(nx.edge_dfs(nx.DiGraph(edges), nodes))
[(0, 1), (1, 0), (2, 1), (3, 1)]
>>> list(nx.edge_dfs(nx.MultiGraph(edges), nodes))
[(0, 1, 0), (1, 0, 1), (0, 1, 2), (1, 2, 0), (1, 3, 0)]
>>> list(nx.edge_dfs(nx.MultiDiGraph(edges), nodes))
[(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 1, 0), (3, 1, 0)]
>>> list(nx.edge_dfs(nx.DiGraph(edges), nodes, orientation="ignore"))
[(0, 1, 'forward'), (1, 0, 'forward'), (2, 1, 'reverse'), (3, 1, 'reverse')]
>>> list(nx.edge_dfs(nx.MultiDiGraph(edges), nodes, orientation="ignore"))
[(0, 1, 0, 'forward'), (1, 0, 0, 'forward'), (1, 0, 1, 'reverse'), (2, 1, 0, 'reverse'), (3, 1, 0, 'reverse')]
Notes
-----
The goal of this function is to visit edges. It differs from the more
familiar depth-first traversal of nodes, as provided by
:func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges`, in
that it does not stop once every node has been visited. In a directed graph
with edges [(0, 1), (1, 2), (2, 1)], the edge (2, 1) would not be visited
if not for the functionality provided by this function.
See Also
--------
:func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges`
"""
nodes = list(G.nbunch_iter(source))
if not nodes:
return
directed = G.is_directed()
kwds = {"data": False}
if G.is_multigraph() is True:
kwds["keys"] = True
# set up edge lookup
if orientation is None:
def edges_from(node):
return iter(G.edges(node, **kwds))
elif not directed or orientation == "original":
def edges_from(node):
for e in G.edges(node, **kwds):
yield e + (FORWARD,)
elif orientation == "reverse":
def edges_from(node):
for e in G.in_edges(node, **kwds):
yield e + (REVERSE,)
elif orientation == "ignore":
def edges_from(node):
for e in G.edges(node, **kwds):
yield e + (FORWARD,)
for e in G.in_edges(node, **kwds):
yield e + (REVERSE,)
else:
raise nx.NetworkXError("invalid orientation argument.")
# set up formation of edge_id to easily look up if edge already returned
if directed:
def edge_id(edge):
# remove direction indicator
return edge[:-1] if orientation is not None else edge
else:
def edge_id(edge):
# single id for undirected requires frozenset on nodes
return (frozenset(edge[:2]),) + edge[2:]
# Basic setup
check_reverse = directed and orientation in ("reverse", "ignore")
visited_edges = set()
visited_nodes = set()
edges = {}
# start DFS
for start_node in nodes:
stack = [start_node]
while stack:
current_node = stack[-1]
if current_node not in visited_nodes:
edges[current_node] = edges_from(current_node)
visited_nodes.add(current_node)
try:
edge = next(edges[current_node])
except StopIteration:
# No more edges from the current node.
stack.pop()
else:
edgeid = edge_id(edge)
if edgeid not in visited_edges:
visited_edges.add(edgeid)
# Mark the traversed "to" node as to-be-explored.
if check_reverse and edge[-1] == REVERSE:
stack.append(edge[0])
else:
stack.append(edge[1])
yield edge | 8e7f1ba137f0392768e5b814a8898842bf2e5c2f | 5,610 |
import hashlib
async def _md5_by_reading(filepath: str, chunk_size: int = DEFAULT_BUFFER_SIZE) -> str:
"""
Compute md5 of a filepath.
"""
file_hash = hashlib.md5()
async with async_open(filepath, "rb") as reader:
async for chunk in reader.iter_chunked(chunk_size):
file_hash.update(chunk)
return file_hash.hexdigest() | d42e3f6ba994bc35c32cab48cdc4b78e44f678d1 | 5,611 |
def client_authenticator_factory(mechanism,password_manager):
"""Create a client authenticator object for given SASL mechanism and
password manager.
:Parameters:
- `mechanism`: name of the SASL mechanism ("PLAIN", "DIGEST-MD5" or "GSSAPI").
- `password_manager`: name of the password manager object providing
authentication credentials.
:Types:
- `mechanism`: `str`
- `password_manager`: `PasswordManager`
:return: new authenticator.
:returntype: `sasl.core.ClientAuthenticator`"""
authenticator=all_mechanisms_dict[mechanism][0]
return authenticator(password_manager) | 93fccb21f71a31fed953f6260f5906b240669033 | 5,612 |
def wait_for_cell_data_connection(
log,
ad,
state,
timeout_value=EventDispatcher.DEFAULT_TIMEOUT):
"""Wait for data connection status to be expected value for default
data subscription.
Wait for the data connection status to be DATA_STATE_CONNECTED
or DATA_STATE_DISCONNECTED.
Args:
log: Log object.
ad: Android Device Object.
state: Expected status: True or False.
If True, it will wait for status to be DATA_STATE_CONNECTED.
If False, it will wait for status ti be DATA_STATE_DISCONNECTED.
timeout_value: wait for cell data timeout value.
This is optional, default value is EventDispatcher.DEFAULT_TIMEOUT
Returns:
True if success.
False if failed.
"""
sub_id = get_default_data_sub_id(ad)
return wait_for_cell_data_connection_for_subscription(log, ad, sub_id,
state, timeout_value) | f2e2474af757c5a36cb054afe1f429638641f2e6 | 5,613 |
import configparser
def _parse_lists(config_parser: configparser.ConfigParser, section: str = '') -> t.Dict:
"""Parses multiline blocks in *.cfg files as lists."""
config = dict(config_parser.items(section))
for key, val in config.items():
if '/' in val and 'parameters' not in section:
config[key] = parse_mars_syntax(val)
elif '\n' in val:
config[key] = _splitlines(val)
return config | d591a9eeb656dff9c4fbbce9964575cd7ce15352 | 5,614 |
def get_filename_pair(filename):
"""
Given the name of a VASF data file (*.rsd) or parameter file (*.rsp) return
a tuple of (parameters_filename, data_filename). It doesn't matter if the
filename is a fully qualified path or not.
- assumes extensions are all caps or all lower
"""
param_filename = data_filename = filename[:-1]
if filename[-1:].isupper():
data_filename += 'D'
param_filename += 'P'
else:
data_filename += 'd'
param_filename += 'p'
return (param_filename, data_filename) | f6eb5a64cf472f230c5806447d9c2ee8ae43a71d | 5,615 |
async def get_reposet(client, headers, reposet_id):
"""Get the reposet by id."""
url = f"https://api.athenian.co/v1/reposet/{reposet_id}"
return await do_request(client, url, headers) | b97262bf1f246bc563abe352f1122a9ad61705c3 | 5,616 |
def detect_os_flavour(os_type):
"""Detect Linux flavours and return the current version"""
if os_type:
# linux
try:
return platform.linux_distribution()[0]
except Exception, e:
return None
else:
# windows
return platform.platform() | 4ab3ebec3683fc99a99e70540ea29d049b54347d | 5,618 |
def straightenImage(im, imextent, mvx=1, mvy=None, verbose=0, interpolation=cv2_interpolation):
""" Scale image to make square pixels
Arguments
---------
im: array
input image
imextend: list of 4 floats
coordinates of image region (x0, x1, y0, y1)
mvx, mvy : float
number of mV per pixel requested
Returns
-------
ims: numpy array
transformed image
(fw, fh, mvx, mvy, H) : data
H is the homogeneous transform from original to straightened image
"""
if cv2 is None:
raise Exception('opencv is not installed, method straightenImage is not available')
dxmv = imextent[1] - imextent[0]
dymv = imextent[3] - imextent[2]
dx = im.shape[1]
dy = im.shape[0]
mvx0 = dxmv / float(dx - 1) # mv/pixel
mvy0 = dymv / float(dy - 1)
if mvy is None:
mvy = mvx
fw = np.abs((float(mvx0) / mvx))
fh = np.abs((float(mvy0) / mvy))
if fw < .5:
fwx = fw
fac = 1
ims = im
while (fwx < .5):
ims = cv2.resize(
ims, None, fx=.5, fy=1, interpolation=cv2.INTER_LINEAR)
fwx *= 2
fac *= 2
ims = cv2.resize(
ims, None, fx=fac * fw, fy=fh, interpolation=interpolation)
else:
ims = cv2.resize(im, None, fx=fw, fy=fh, interpolation=interpolation)
if verbose:
print('straightenImage: size %s fx %.4f fy %.4f' % (im.shape, fw, fh))
print('straightenImage: result size %s mvx %.4f mvy %.4f' % (ims.shape, mvx, mvy))
H = pgeometry.pg_transl2H([-.5, -.5]) .dot(np.diag([fw, fh, 1]).dot(pgeometry.pg_transl2H([.5, .5])))
return ims, (fw, fh, mvx, mvy, H) | ab46e394011a8a2d9ed8974504e4e28b725ead78 | 5,619 |
import math
def H(r2, H_s, H_d, a_s, a_d, gamma_s, gamma_d, G, v):
"""
"""
pi = math.pi
sqrt = math.sqrt
r = sqrt(r2)
H2_s = H_s**2
H2_d = H_d**2
R2_s = r2 + H2_s
R2_d = r2 + H2_d
alpha_s = 1.0 if gamma_s == 1.0 else 4 * H2_s / (pi*R2_s)
alpha_d = 1.0 if gamma_d == 1.0 else 4 * H2_d / (pi*R2_d)
f_s = a_s**3 * alpha_s * (1-v) / (G * (H2_s+r2)**1.5)
f_d = a_d**3 * alpha_d * (1-v) / (G * (H2_d+r2)**1.5)
H = [
[ r*f_s, r*f_d ], # the radial H
[ H_s*f_s, H_d*f_d ] # the vertical H
]
return H | 0fa1606212278def22075692a56468d41a8c7a3c | 5,620 |
from datetime import datetime
def parse_time(event_time):
"""Take a string representation of time from the blockchain, and parse it into datetime object."""
return datetime.strptime(event_time, '%Y-%m-%dT%H:%M:%S') | df5af3a20acbeaa8e7424291d26055d3f38219ed | 5,621 |
def addBenchmark(df):
"""Add benchmark to df."""
# Compute the inverse of the distance
distance_inv = (1. / df.filter(regex='^distance*', axis=1)).values
# Extract the value at the nearest station
values = df.filter(regex='value_*', axis=1)
# Compute the benchmark
numer = (distance_inv * values).sum(axis=1)
denom = (distance_inv * (values != 0)).sum(axis=1)
# Compute the benchmark
benchmark = numer / denom
df["Benchmark"] = benchmark
return df | 62c63215d622c46bed8200f97ad55b985e2beb20 | 5,623 |
def is_file_like(f):
"""Check to see if ```f``` has a ```read()``` method."""
return hasattr(f, 'read') and callable(f.read) | 9eee8c8f4a6966d1db67fb4aa9149e2fbd390fb9 | 5,624 |
def _string_to_days_since_date(dateStrings, referenceDate='0001-01-01'):
"""
Turn an array-like of date strings into the number of days since the
reference date
"""
dates = [_string_to_datetime(string) for string in dateStrings]
days = _datetime_to_days(dates, referenceDate=referenceDate)
days = np.array(days)
return days | 114883cf0f2e48812a580c4cd8ae64671a4fc126 | 5,625 |
def safe_as_int(val, atol=1e-3):
"""
Attempt to safely cast values to integer format.
Parameters
----------
val : scalar or iterable of scalars
Number or container of numbers which are intended to be interpreted as
integers, e.g., for indexing purposes, but which may not carry integer
type.
atol : float
Absolute tolerance away from nearest integer to consider values in
``val`` functionally integers.
Returns
-------
val_int : NumPy scalar or ndarray of dtype `cupy.int64`
Returns the input value(s) coerced to dtype `cupy.int64` assuming all
were within ``atol`` of the nearest integer.
Notes
-----
This operation calculates ``val`` modulo 1, which returns the mantissa of
all values. Then all mantissas greater than 0.5 are subtracted from one.
Finally, the absolute tolerance from zero is calculated. If it is less
than ``atol`` for all value(s) in ``val``, they are rounded and returned
in an integer array. Or, if ``val`` was a scalar, a NumPy scalar type is
returned.
If any value(s) are outside the specified tolerance, an informative error
is raised.
Examples
--------
>>> safe_as_int(7.0)
7
>>> safe_as_int([9, 4, 2.9999999999])
array([9, 4, 3])
>>> safe_as_int(53.1)
Traceback (most recent call last):
...
ValueError: Integer argument required but received 53.1, check inputs.
>>> safe_as_int(53.01, atol=0.01)
53
"""
mod = np.asarray(val) % 1 # Extract mantissa
# Check for and subtract any mod values > 0.5 from 1
if mod.ndim == 0: # Scalar input, cannot be indexed
if mod > 0.5:
mod = 1 - mod
else: # Iterable input, now ndarray
mod[mod > 0.5] = 1 - mod[mod > 0.5] # Test on each side of nearest int
try:
np.testing.assert_allclose(mod, 0, atol=atol)
except AssertionError:
raise ValueError(
"Integer argument required but received "
"{0}, check inputs.".format(val)
)
return np.around(val).astype(np.int64) | cbaff1fb1568fd43a4dd3a7a2054a805788b912c | 5,626 |
def check_protocol(protocol):
"""
Check if a given protocol works by computing the qubit excitation probabilities
"""
qubit_weight = {}
qubit_weight[protocol[0][0][0]] = 1.0
for pair_set in protocol:
for i, j, p in pair_set:
qubit_weight[j] = qubit_weight[i] * (1.0 - p)
qubit_weight[i] *= p
return qubit_weight | 8b9d0a8e329a340718d37bc79066be4a05cf2d20 | 5,627 |
def detachVolume(**kargs):
""" detachVolume your Additional Volume
* Args:
- zone(String, Required) : [KR-CA, KR-CB, KR-M, KR-M2]
- id(String, Required) : Volume disk ID
* Examples : print(server.detachVolume(zone='KR-M', id='7f933f86-e8bf-4600-9423-09e8f1c84460'))
"""
my_apikey, my_secretkey = c.read_config()
if not 'zone' in kargs:
return c.printZoneHelp()
if not 'id' in kargs:
return '[ktcloud] Missing required argument \"id\" (disk volume id)'
kargs['zoneid'] = c.getzoneidbyhname(kargs['zone'])
M2Bool = c.IsM2(kargs['zone'])
del kargs['zone']
baseurl = c.geturl(ctype='server', m2=M2Bool)
kargs['command'] = 'detachVolume'
kargs['response'] = 'json'
kargs['apikey'] = my_apikey
return c.makerequest(kargs, baseurl, my_secretkey) | 9c837559052fb41f4e40d18c211a497c7de3ca63 | 5,628 |
from typing import List
from typing import Type
from typing import Optional
from typing import Dict
from typing import Any
from typing import Callable
import time
import traceback
def observe(metric: str,
accept_on: List[Type[Exception]] = [], # pylint: disable=E1136
decline_on: List[Type[Exception]] = [], # pylint: disable=E1136
static_tags: List[str] = [], # pylint: disable=E1136
tags_from: Optional[Dict[str, List[str]]] = None, # pylint: disable=E1136
trace_id_from: Optional[Dict[str, str]] = None) -> Any: # pylint: disable=E1136
"""This operator will, based on the provided setup generate logs, metrics, notifications on each call for that execution.
Args:
metric (str): The root-metric which will be updated during execution in e.g. DogStatsd.
accept_on (Optional[List[Exception]], optional): A list of exceptions on which the message will be acknowledged.
decline_on (Optional[List[Exception]], optional): A list of exceptions on which the message will be declined.
static_tags (Optional[List[str]], optional): A list of tags to be appended on each metric update.
tags_from (Optional[Dict[str, List[str]]], optional): A list of tags to be dynamically extracted from the key dictionary.
trace_id_from (Optional[Dict[str, str]], optional): A trace_id to be appended on each log from the key dictionary.
"""
def arrange(func: Callable[..., Any]):
@wraps(func)
def inner(*args: Any, **kwargs: Any) -> Any:
# setup tracing and tags
trace_id = Resolver.resolve_trace_id(trace_id_from=trace_id_from, **kwargs)
identity = Resolver.resolve_identity(*args, func=func, trace_id=trace_id)
additional_tags = Resolver.resolve_tags_from(tags_from=tags_from, **kwargs)
all_tags = additional_tags + static_tags
imetric = Provider.get_metric(*args)
# start timing
time_start: float = time.monotonic()
try:
# actual function execution
response: Any = func(*args, **kwargs)
# calculate process time
process_time = int(time.monotonic() - time_start) * 1000
# append extra tags
all_tags.append(Resolver.resolve_observed_sli_tag(process_time=process_time))
# send metrics, finished successfully
imetric.timing("%s.time.finished" % metric, process_time, all_tags)
imetric.gauge("%s.time_gauge.finished" % metric, process_time, all_tags)
imetric.increment("%s.finished" % metric, 1, all_tags)
except Exception as ex:
# calculate process time
process_time = int(time.monotonic() - time_start) * 1000
# append extra tags
all_tags.append('exception:%s' % type(ex).__name__)
all_tags.append(Resolver.resolve_observed_sli_tag(process_time=process_time))
# accept on, returns True
if type(ex) in accept_on:
# log warning
Provider.get_logger(*args).warning("%s: %s(%s) during '%s' accepted.\n%s" % (
identity, type(ex).__name__, ex, func.__name__, traceback.format_exc()))
# send metrics, raised but accepted
imetric.timing("%s.time.accepted" % metric, process_time, all_tags)
imetric.gauge("%s.time_gauge.accepted" % metric, process_time, all_tags)
imetric.increment('%s.exception.accepted' % metric, 1, all_tags)
# return truthy, to be acknowledged
return True
# decline on, returns False
if type(ex) in decline_on:
# log error
Provider.get_logger(*args).error("%s: %s(%s) during '%s' declined.\n%s" % (
identity, type(ex).__name__, ex, func.__name__, traceback.format_exc()))
# send metrics, raised but declined
imetric.timing("%s.time.declined" % metric, process_time, all_tags)
imetric.gauge("%s.time_gauge.declined" % metric, process_time, all_tags)
imetric.increment('%s.exception.declined' % metric, 1, all_tags)
# return falsy, not to be acknowledged
return False
# unhandled exception, log error
Provider.get_logger(*args).error("%s: %s(%s) during '%s' raised.\n%s" % (
identity, type(ex).__name__, ex, func.__name__, traceback.format_exc()))
# send metrics, raised and unhandled
imetric.timing("%s.time.raised" % metric, process_time, all_tags)
imetric.gauge("%s.time_gauge.raised" % metric, process_time, all_tags)
imetric.increment('%s.exception.raised' % metric, 1, all_tags)
# check if notification client available
slack = Provider.get_slack(*args)
if slack:
# notify
slack.error(header=identity, title=type(ex).__name__, text=f"{ex}\n{traceback.format_exc()}")
# re-raise
raise ex
finally:
# send metric, start
imetric.increment("%s.start" % metric, 1, all_tags)
# return actual response of the function
return response
return inner
return arrange | 501fbaf8b4b3f77e334d7579834162f0393d1b5d | 5,629 |
import numpy
def spherical_to_cartesian(lons, lats, depths):
"""
Return the position vectors (in Cartesian coordinates) of list of spherical
coordinates.
For equations see: http://mathworld.wolfram.com/SphericalCoordinates.html.
Parameters are components of spherical coordinates in a form of scalars,
lists or numpy arrays. ``depths`` can be ``None`` in which case it's
considered zero for all points.
:returns:
``numpy.array`` of 3d vectors representing points' coordinates in
Cartesian space. The array has the same shape as parameter arrays.
In particular it means that if ``lons`` and ``lats`` are scalars,
the result is a single 3d vector. Vector of length ``1`` represents
distance of 1 km.
See also :func:`cartesian_to_spherical`.
"""
phi = numpy.radians(lons)
theta = numpy.radians(lats)
if depths is None:
rr = EARTH_RADIUS
else:
rr = EARTH_RADIUS - numpy.array(depths)
cos_theta_r = rr * numpy.cos(theta)
xx = cos_theta_r * numpy.cos(phi)
yy = cos_theta_r * numpy.sin(phi)
zz = rr * numpy.sin(theta)
vectors = numpy.array([xx.transpose(), yy.transpose(), zz.transpose()]) \
.transpose()
return vectors | 107899c23eeb7fb2cf79fbaa6650b4584543260a | 5,631 |
import time
def get_remote_webdriver(hub_url, browser, browser_ver, test_name):
"""
This functions returns remote web-driver instance created in selenoid
machine.
:param hub_url
:param browser: browser name
:param browser_ver: version for browser
:param test_name: test name
:return: remote web-driver instance for specified browser
"""
test_name = browser + browser_ver + "_" + test_name + "-" + time.strftime(
"%m_%d_%y_%H_%M_%S", time.localtime())
driver_local = None
desired_capabilities = {
"version": browser_ver,
"enableVNC": True,
"enableVideo": True,
"enableLog": True,
"videoName": test_name + ".mp4",
"logName": test_name + ".log",
"name": test_name,
"timeZone": "Asia/Kolkata",
"sessionTimeout": "180s"
}
if browser == 'firefox':
profile = webdriver.FirefoxProfile()
profile.set_preference("dom.disable_beforeunload", True)
desired_capabilities["browserName"] = "firefox"
desired_capabilities["requireWindowFocus"] = True
desired_capabilities["enablePersistentHover"] = False
driver_local = webdriver.Remote(
command_executor=hub_url,
desired_capabilities=desired_capabilities, browser_profile=profile)
elif browser == 'chrome':
options = Options()
options.add_argument("--window-size=1280,1024")
desired_capabilities["browserName"] = "chrome"
driver_local = webdriver.Remote(
command_executor=hub_url,
desired_capabilities=desired_capabilities, options=options)
else:
print("Specified browser does not exist.")
# maximize browser window
driver_local.maximize_window()
# driver_local.implicitly_wait(2)
return driver_local | 2f467f38f2fda6e7f95343e842ea42c3bb551181 | 5,632 |
def process_images(rel_root_path, item_type, item_ids, skip_test, split_attr,
gen_image_specs_func, trafo_image_func,
trafo_image_extra_kwargs=None, img_obj_type=None,
img_attr=None, dimensions=(256, 256),
max_valset_size=10000):
"""
This function downloads all photos which are part of the
dataset. This is a general function which can be used for lots of different
layers.
It returns a dictionary which contains the downloaded image paths.
Key: dataset split identifier, can be 'E', 'V', 'R'
Value: tuple of (item indexes in the item_ids array, corresponding image paths)
:param rel_root_path: The root path of the photos and generated training
files relative to the Caffe root path.
:param item_type: The type of the model class for the items which are
classified (e.g. FgPhoto). This class should have 'photo',
'matclass_dataset_split' attributes/properties. The photo attribute should
have most of the Photo model's fields. It is advised to use an actual Photo
instance here. The matclass_dataset_split attribute should indicate in
which dataset split this item is in. The possible dataset splits are 'E'
(test), 'V' (validation), 'R' (training).
:param item_ids: List (or numpy array) of ids into the :ref:`item_type`
table. It should contain the training, validation and test set.
:param skip_test: If true, skip generating file and downloading images for
the test split.
:param split_attr: The attribute name which represents the dataset split in
the database. It should be one character, 'E' meaning test, 'V' meaning
validation, 'R' meaning training.
:param gen_image_specs_func: Function which generates an id, photo id, image
path triplet for each item which we later use to download the images.
:param trafo_image_func: If None, we don't apply any transformation on the
images. Function which transforms an image given the image path and the
extra parameters, it should return the path of the transformed image, which
can be the original image path or a new path.
:ref:`trafo_image_extra_kwargs` will be passed as extra parameters to this function.
:param trafo_image_extra_kwargs: Extra keyword arguments which will be passed to
:ref:`trafo_image_func` function. All of them should be a list which has the
same order as :ref:`item_ids`.
:param img_obj_type: The type of the model class which holds an image.
:param img_attr: The attribute of `img_obj_type` which holds the image.
:param dimensions: The dimensions to resize the downloaded images to. If
None, keep the image as original size.
:param max_valset_size: The maximum size for the validation set.
"""
item_id_to_idx = {id: idx for idx, id in enumerate(item_ids)}
abbr, fnames = get_abbr_fname(skip_test)
# The return value
image_data = {}
for mc_ds_s, fname in zip(abbr, fnames):
data_path = os.path.join(rel_root_path, 'data')
ensuredir(os.path.join(settings.CAFFE_ROOT, data_path))
print 'Generating split file and downloading images for {} split...'.format(fname)
print 'Generating a list of images to download...'
image_specs = []
for item_ids_batch in progress_bar(iter_batch(item_ids, 10000)):
# Note that the order is not going to be the same as
# item_ids_batch, so we expect the data layer to shuffle the data!
items_split = (
item_type.objects.
filter(**{split_attr: mc_ds_s}).
filter(id__in=item_ids_batch).
order_by()
)
# A list of item_id, image_url, image_path tuples
image_specs += gen_image_specs_func(data_path, items_split)
if not image_specs:
image_data[mc_ds_s] = ([], [])
continue
# We want the validation step to finish in tractable time, so we have a
# maximum threshold on the validation set size
if mc_ds_s == 'V' and len(image_specs) > max_valset_size:
print 'Sampling {} images to reduce the size of the validation set...'.format(max_valset_size)
# For reproducibility
random.seed(125)
image_specs = random.sample(image_specs, max_valset_size)
item_ids_perm, img_obj_ids, image_paths_list = zip(*image_specs)
# A corresponding list of indices into the item_ids array
item_idxs = [item_id_to_idx[item_id] for item_id in item_ids_perm]
# Add caffe root to all paths for downloading
full_image_paths_list = [
[
os.path.join(settings.CAFFE_ROOT, ip)
for ip in ipl
]
for ipl in image_paths_list
]
# Downloading images
download_images(
item_type=img_obj_type,
item_ids=list(itertools.chain.from_iterable(img_obj_ids)),
img_attr=img_attr,
image_paths=list(itertools.chain.from_iterable(full_image_paths_list)),
format='JPEG',
dimensions=dimensions,
)
if trafo_image_func:
print 'Transforming images...'
new_image_paths_list = []
new_item_idxs = []
for item_idx, image_paths, full_image_paths in progress_bar(zip(item_idxs, image_paths_list, full_image_paths_list)):
new_image_paths = trafo_image_func(
image_paths,
full_image_paths,
**index_kwargs(trafo_image_extra_kwargs, item_idx)
)
if not new_image_paths:
print ':( {}'.format(full_image_paths)
continue
new_image_paths_list.append(new_image_paths)
new_item_idxs.append(item_idx)
image_paths_list = new_image_paths_list
item_idxs = new_item_idxs
image_data[mc_ds_s] = (item_idxs, image_paths_list)
return image_data | 9d9d6d53ed6a93c2c8b3f7c70d6cda54277b42b1 | 5,633 |
from typing import Callable
def get_transform_dict(args, strong_aug: Callable):
"""
Generates dictionary with transforms for all datasets
Parameters
----------
args: argparse.Namespace
Namespace object that contains all command line arguments with their corresponding values
strong_aug: Callable
Callable object implementing the applied strong augmentation strategy, i.e. RandAugment or CTAugment
(not implemented yet).
Returns
-------
transform_dict: Dict
Dictionary containing transforms for the labeled train set, unlabeled train set
and the validation / test set
"""
img_size = IMG_SIZE[args.dataset]
padding = int(0.125 * img_size)
return {
"train": FixMatchTransform.labeled(args.dataset, img_size, padding),
"train_unlabeled": FixMatchTransform.unlabeled(args.dataset, strong_aug, img_size, padding),
"test": get_normalizer(args.dataset),
} | 17ecc3ee611a9fa73176f9a9f354e293d7e4cc39 | 5,635 |
def choose_first_not_none(*args):
""" Choose first non None alternative in args.
:param args: alternative list
:return: the first non None alternative.
"""
for a in args:
if a is not None:
return a
return None | fe3efba85251161cd0a6ecb50583cc443cd04dc0 | 5,636 |
def _format_compact(value, short=True):
"""Compact number formatting using proper suffixes based on magnitude.
Compact number formatting has slightly idiosyncratic behavior mainly due to
two rules. First, if the value is below 1000, the formatting should just be a
2 digit decimal formatting. Second, the number is always truncated to leave at
least 2 digits. This means that a number with one digit more than the
magnitude, such as 1250, is still left with 1.2K, whereas one more digit would
leave it without the decimal, such as 12500 becoming 12K.
Args:
value: The value to format.
short: Whether to use the short form suffixes or long form suffixes.
Returns:
A formatted number as a string.
"""
if value < 1000:
return '{0:.2f}'.format(value).rstrip('0').rstrip('.')
suffixes = _SHORT_SUFFIXES if short else _LONG_SUFFIXES
for key, suffix in sorted(suffixes.items(), reverse=True):
if value >= key:
value = value / float(key)
if value >= 10:
pattern = '{0:,.0f}' + suffix
else:
pattern = '{0:.1f}' + suffix
return pattern.format(value) | 3f2a2b034cbe1e8a3f21ded743ec328a692cc039 | 5,637 |
def matrix(mat,nrow=1,ncol=1,byrow=False):
"""Given a two dimensional array, write the array in a matrix form"""
nr=len(mat)
rscript='m<-matrix(data=c('
try:
nc=len(mat[0])
for m in mat:
rscript+=str(m)[1:-1]+ ', '
rscript=rscript[:-2]+'), nrow=%d, ncol=%d, byrow=TRUE,' %(nr,nc)
except TypeError:
rscript+=str(mat)[1:-1]+','
rscript=rscript[:-1]+'), nrow=%d, ncol=%d,' %(nrow,ncol)
if byrow: rscript+='byrow=TRUE,'
rscript=rscript[:-1]+')\n'
return rscript | a28d91d797238857dd2ff58f24655504a936d4a7 | 5,638 |
from re import T
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
"""
return T.all(x, axis=axis, keepdims=keepdims) | b01891385c2b41d42b976beaf0ee8922b632d705 | 5,639 |
def config_find_permitted(cookie, dn, in_class_id, in_filter, in_hierarchical=YesOrNo.FALSE):
""" Auto-generated UCS XML API Method. """
method = ExternalMethod("ConfigFindPermitted")
method.cookie = cookie
method.dn = dn
method.in_class_id = in_class_id
method.in_filter = in_filter
method.in_hierarchical = (("false", "true")[in_hierarchical in ucsgenutils.AFFIRMATIVE_LIST])
xml_request = method.to_xml(option=WriteXmlOption.DIRTY)
return xml_request | 0367fb6d208b4a03ea4c1cc79e14faabe038cba6 | 5,640 |
def reformat_medication_statuses(data: FilteredData) -> FilteredData:
"""
Reformats medication statuses to binary indicators.
Args:
data: The data containing medication statuses to reformat.
Returns:
Data with reformatted medication statuses.
"""
for j in data.medications.columns[data.medications.columns.str.contains(
'_status$')]:
data.medications[j] = (~(data.medications[j].isin(
['NONE', 'NEW']))).astype(int)
return data | 4428d7e65893dfea33490de28e77dffe66562c31 | 5,641 |
import json
def list_daemons(dut):
"""Get daemon table from ovsdb-server."""
daemon_list = {}
c = ovs_vsctl + "--format json list daemon"
out = dut(c, shell="bash")
json_out = json.loads(out)['data']
# The output is in the following format
# [["uuid","19b943b0-096c-4d7c-bc0c-5b6ac2f83014"],0,true,"ops-pmd"]
for item in json_out:
daemon_list[item[3]] = {'is_hw_handler': item[2]}
return daemon_list | 35b28e8c38cd48a93f642b0e2820d0ff2ff87450 | 5,643 |
import math
import collections
def Cleanse(obj, encoding="utf-8"):
"""Makes Python object appropriate for JSON serialization.
- Replaces instances of Infinity/-Infinity/NaN with strings.
- Turns byte strings into unicode strings.
- Turns sets into sorted lists.
- Turns tuples into lists.
Args:
obj: Python data structure.
encoding: Charset used to decode byte strings.
Returns:
Unicode JSON data structure.
"""
if isinstance(obj, int):
return obj
elif isinstance(obj, float):
if obj == _INFINITY:
return "Infinity"
elif obj == _NEGATIVE_INFINITY:
return "-Infinity"
elif math.isnan(obj):
return "NaN"
else:
return obj
elif isinstance(obj, bytes):
return obj.decode(encoding)
elif isinstance(obj, (list, tuple)):
return [Cleanse(i, encoding) for i in obj]
elif isinstance(obj, set):
return [Cleanse(i, encoding) for i in sorted(obj)]
elif isinstance(obj, dict):
return collections.OrderedDict(
(Cleanse(k, encoding), Cleanse(v, encoding)) for k, v in obj.items()
)
else:
return obj | e50e44bd3aa685838ea2e68537e2df288e9a058f | 5,644 |
import time
def wait_no_exception(lfunction, exc_class=None, exc_matcher=None):
"""Stops waiting on success."""
start_time = time.time()
if exc_matcher is not None:
exc_class = boto.exception.BotoServerError
if exc_class is None:
exc_class = BaseException
while True:
result = None
try:
result = lfunction()
LOG.info('No Exception in %d second',
time.time() - start_time)
return result
except exc_class as exc:
if exc_matcher is not None:
res = exc_matcher.match(exc)
if res is not None:
LOG.info(res)
raise exc
# Let the other exceptions propagate
dtime = time.time() - start_time
if dtime > default_timeout:
raise TestCase.failureException("Wait timeout exceeded! (%ds)" %
dtime)
time.sleep(default_check_interval) | be5e9798570dd3f6ca6f9b78d136179f68a4ad3c | 5,645 |
def testapp(app, init_database):
"""Create Webtest app."""
testapp = TestApp(app)
#testapp = TestApp(app, extra_environ=dict(REMOTE_USE='test'))
# testapp.set_authorization(('Basic', (app.config['USERNAME'],app.config['PASSWORD'])))
# testapp.get_authorization()
return testapp | 01a579ae22c0eedfaac7d6dd5aeffd1b63f34612 | 5,646 |
def dois(self, key, value):
"""Translates dois fields."""
_identifiers = self.get("identifiers", [])
for v in force_list(value):
material = mapping(
MATERIALS,
clean_val("q", v, str, transform="lower"),
raise_exception=True,
)
doi = {
"value": clean_val("a", v, str, req=True),
"material": material,
"source": clean_val("9", v, str),
"scheme": "DOI",
}
if doi not in _identifiers:
_identifiers.append(doi)
return _identifiers | b7635815c451856d249feebeb1084ad28f0357d9 | 5,647 |
def transition(src,
dest,
state=None,
permissions=None,
required=None,
commit_record=True,
**kwargs):
"""Decorator that marks the wrapped function as a state transition.
:params parameters for transition object, see documentation for details.
:returns: A wrapper around a wrapped function, with added `_fsm` field containing the `Transition` spec.
"""
if permissions is not None and not isinstance(permissions, (list, tuple)):
permissions = [permissions]
if required is not None and not isinstance(required, (list, tuple)):
required = [required]
if not isinstance(src, (list, tuple)):
src = [src]
t = Transition(
src=src,
dest=dest,
state=state,
permissions=permissions,
required=required,
commit_record=commit_record,
**kwargs
)
def inner(f):
@has_required_params(t)
def wrapper(self, *args, **kwargs):
record = self
t.check_valid_state(record)
t.check_permissions(record)
t.execute(record=record, **kwargs)
return f(self, *args, **kwargs)
wrapper._fsm = t
t.function = wrapper
t.original_function = f
return wrapper
return inner | 309ffca49c2dd2af4dabb084c5f642d40e9d34e8 | 5,648 |
def get_html(url):
"""Returns HTML object based on given Gumtree URL.
:param url: Offer URL.
:return: Offer HTML object.
"""
session = HTMLSession()
try:
r = session.get(url)
return r.html
except ParserError:
return None | 6fd3aa8e7f2ff81f912e0d7050872a6e2de14827 | 5,649 |
def _get_lattice_parameters(lattice):
"""Return basis vector lengths
Parameters
----------
lattice : array_like
Basis vectors given as column vectors
shape=(3, 3), dtype='double'
Returns
-------
ndarray, shape=(3,), dtype='double'
"""
return np.array(np.sqrt(np.dot(lattice.T, lattice).diagonal()),
dtype='double') | 405111d5052307dd995e64c2ff7936481db4f34d | 5,650 |
def save_batches(current_memory, id_tmp_dir, batch_num):
"""
batch_num : corresponds to the gradient update number
"""
target_csv = id_tmp_dir + "/batch" + str(batch_num) + ".csv"
obs_copy = deepcopy(current_memory['current_obs'])
reward_copy = deepcopy(current_memory['rewards'])
current_obs_batch = obs_copy.cpu().numpy()
obs_x = current_obs_batch[:,0]
obs_y = current_obs_batch[:,1]
reward_batch = reward_copy.cpu().numpy()
batch_list = np.column_stack((obs_x, obs_y, reward_batch))
fileheader = 'X-Position, Y-Position, Reward'
np.savetxt(target_csv, batch_list, delimiter=' ', header=fileheader)
return 0 | 311bf2c1083156dc26acb855f2cc61f142a1586b | 5,651 |
def ikrvea_mm(
reference_point: np.ndarray,
individuals: np.ndarray,
objectives: np.ndarray,
uncertainity: np.ndarray,
problem: MOProblem,
u: int) -> float:
""" Selects the solutions that need to be reevaluated with the original functions.
This model management is based on the following papaer:
'P. Aghaei Pour, T. Rodemann, J. Hakanen, and K. Miettinen, “Surrogate assisted interactive
multiobjective optimization in energy system design of buildings,”
Optimization and Engineering, 2021.'
Args:
reference_front (np.ndarray): The reference front that the current front is being compared to.
Should be an one-dimensional array.
individuals (np.ndarray): Current individuals generated by using surrogate models
objectives (np.ndarray): Current objectives generated by using surrogate models
uncertainity (np.ndarray): Current Uncertainty values generated by using surrogate models
problem : the problem class
Returns:
float: the new problem object that has an updated archive.
"""
nd = remove_duplicate(individuals, problem.archive.drop(
problem.objective_names, axis=1).to_numpy()) #removing duplicate solutions
if len(nd) == 0:
return problem
else:
non_duplicate_dv = individuals[nd]
non_duplicate_obj = objectives[nd]
non_duplicate_unc = uncertainity[nd]
# Selecting solutions with lowest ASF values
asf_solutions = SimpleASF([1]*problem.n_of_objectives).__call__(non_duplicate_obj, reference_point)
idx = np.argpartition(asf_solutions, 2*u)
asf_unc = np.max(non_duplicate_unc [idx[0:2*u]], axis= 1)
# index of solutions with lowest Uncertainty
lowest_unc_index = np.argpartition(asf_unc, u)[0:u]
# evaluating the solutions in asf_unc with lowest uncertainty. The archive will get update in problem.evaluate()
problem.evaluate(non_duplicate_dv[lowest_unc_index], use_surrogate=False)[0]
problem.train(models=GaussianProcessRegressor,\
model_parameters={'kernel': Matern(nu=1.5)})
return problem | f9959cf7ddfcc2aa3aa2fc8c3062cfb63082b242 | 5,652 |
def homepage(request):
"""Main view of app.
We will display page with few step CTA links?
:param request: WSGIRequest instance
"""
if logged_as_admin(request):
offers = Offer.objects.get_for_administrator()
else:
offers = Offer.objects.get_weightened()
return render(
request,
'homepage.html',
{
'offers': offers,
'MEDIA_URL': settings.MEDIA_URL,
}
) | 003e6f86ab09ede87e7f1c86910808c2da9c1e9d | 5,653 |
def add_dict(dct1, dct2):
"""Returns a new dictionaries where the content of the dictionaries `dct1`
and `dct2` are merged together."""
result = dct1.copy()
result.update(dct2)
return result | eba785e4d00534e94c1bdde413603d64e18aac05 | 5,654 |
import tempfile
from pathlib import Path
def edit_temp(contents="", name=""):
"""
Create a temporary file and open it in the system's default editor for the
user to edit. The saved contents of the file will be returned when the
editor is closed.
:param contents: Pre-fill the file with the given text.
:param name: Ensure that the temp filename has the given name.
:return: Contents of the file when the editor is closed.
"""
# Create a temp file, ensure it has requested name and contents
td = tempfile.TemporaryDirectory()
tfpath = Path(td.name) / (name or DEFAULT_TEMPFILE)
write_file(tfpath, contents)
# Edit interactively
return edit(tfpath) | 174acda4961cc945b917be6c66d1218d3e46914d | 5,655 |
def _new_primitive_control(
rabi_rotation=None,
azimuthal_angle=0.,
maximum_rabi_rate=2. * np.pi,
**kwargs):
"""
Primitive driven control.
Parameters
----------
rabi_rotation : float, optional
The total rabi rotation to be performed by the driven control.
maximum_rabi_rate : float, optional
Defaults to 2.*np.pi
The maximum rabi frequency for the driven control.
azimuthal_angle : float, optional
The azimuthal position of the driven control.
kwargs : dict
Other keywords required to make a qctrlopencontrols.DrivenControls.
Returns
-------
qctrlopencontrols.DrivenControl
The driven control.
"""
(maximum_rabi_rate, rabi_rotation, azimuthal_angle) = _predefined_common_attributes(
maximum_rabi_rate, rabi_rotation, azimuthal_angle)
return DrivenControl(
rabi_rates=[maximum_rabi_rate],
azimuthal_angles=[azimuthal_angle],
detunings=[0],
durations=[rabi_rotation/maximum_rabi_rate],
**kwargs) | dcaab9ace0269a0404435639d0e9e9e025f1013a | 5,656 |
from typing import Optional
from typing import Any
from typing import Tuple
def check_fit_params(
X: TwoDimArrayLikeType,
y: OneDimArrayLikeType,
sample_weight: Optional[OneDimArrayLikeType] = None,
estimator: Optional[BaseEstimator] = None,
**kwargs: Any
) -> Tuple[TwoDimArrayLikeType, OneDimArrayLikeType, OneDimArrayLikeType]:
"""Check `X`, `y` and `sample_weight`.
Parameters
----------
X
Data.
y
Target.
sample_weight
Weights of data.
estimator
Object to use to fit the data.
**kwargs
Other keywords passed to `sklearn.utils.check_array`.
Returns
-------
X
Converted and validated data.
y
Converted and validated target.
sample_weight
Converted and validated weights of data.
"""
X = check_X(X, estimator=estimator, **kwargs)
if not isinstance(y, pd.Series):
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if is_classifier(estimator):
check_classification_targets(y)
if sample_weight is None:
n_samples = _num_samples(X)
sample_weight = np.ones(n_samples)
sample_weight = np.asarray(sample_weight)
class_weight = getattr(estimator, "class_weight", None)
if class_weight is not None:
sample_weight *= compute_sample_weight(class_weight, y)
check_consistent_length(X, y, sample_weight)
return X, y, sample_weight | f385fb9db06d1bd0ee0a9ecef74fbca8f0754a4d | 5,657 |
async def example_quest(request: web.Request) -> web.Response:
"""
Example quest handler that handles a POST request with a computer science trivia question
:param request: The request object
"""
# Verify that it is a POST request, since that's what this quest is supposed to handle
if request.method == 'POST':
# We will always get JSON from the server, so convert it to a Python dict
data = await request.json()
# Let's see what the server is asking
print(f'Server sent POST to /my-simple-quest:', data)
# Ok so we know that the question is "Who invented C++?"
# The request always contains a "msg" field, and the response always expects an "answer" field
response = { 'answer': 'bjarne stroustrup' }
# The server always expects a JSON response
return web.json_response(response)
else:
Log.error('This quest is supposed to handle POST requests') | fed5ad72d9343c1fd420d98e638bf3f0de995670 | 5,658 |
def get_record_base_model(type_enum):
"""Return the dimension model class for a DimensionType."""
dim_model = _DIMENSION_TO_MODEL.get(type_enum)
if dim_model is None:
raise DSGInvalidDimension(f"no mapping for {type_enum}")
return dim_model | dc232a173ea92bcb6ee2bafcd9eeae46862da5ec | 5,660 |
from io import StringIO
def sas_to_pandas(sas_code, wrds_id, fpath):
"""Function that runs SAS code on WRDS or local server
and returns a Pandas data frame."""
p = get_process(sas_code, wrds_id, fpath)
if wrds_id:
df = pd.read_csv(StringIO(p.read().decode('utf-8')))
else:
df = pd.read_csv(StringIO(p.read()))
df.columns = map(str.lower, df.columns)
p.close()
return(df) | 50806526e1f44e58c227472ced3b5884d8b9f2d5 | 5,662 |
def _client_ip_address(request):
"""Return client ip address for flask `request`.
"""
if request.headers.getlist("X-PNG-Query-For"):
ip_addr = request.headers.getlist("X-PNG-Query-For")[0]
if ip_addr.startswith('::ffff:'):
ip_addr = ip_addr[7:]
elif request.headers.getlist("X-Forwarded-For"):
ip_addr = request.headers.getlist("X-Forwarded-For")[0]
if ip_addr.startswith('::ffff:'):
ip_addr = ip_addr[7:]
else:
ip_addr = request.remote_addr
return ip_addr | eb1b41ee707bac5aefaacdbb1958cd26a47fe288 | 5,664 |
def create_bb_points(vehicle):
"""
Extract the eight vertices of the bounding box from the vehicle.
Parameters
----------
vehicle : opencda object
Opencda ObstacleVehicle that has attributes.
Returns
-------
bbx : np.ndarray
3d bounding box, shape:(8, 4).
"""
bbx = np.zeros((8, 4))
extent = vehicle.bounding_box.extent
bbx[0, :] = np.array([extent.x, extent.y, -extent.z, 1])
bbx[1, :] = np.array([-extent.x, extent.y, -extent.z, 1])
bbx[2, :] = np.array([-extent.x, -extent.y, -extent.z, 1])
bbx[3, :] = np.array([extent.x, -extent.y, -extent.z, 1])
bbx[4, :] = np.array([extent.x, extent.y, extent.z, 1])
bbx[5, :] = np.array([-extent.x, extent.y, extent.z, 1])
bbx[6, :] = np.array([-extent.x, -extent.y, extent.z, 1])
bbx[7, :] = np.array([extent.x, -extent.y, extent.z, 1])
return bbx | 2cf2e2b1e9d64a246369ff9b182199ed64fb71b9 | 5,665 |
def featuredrep_set_groups(sender, **kwargs):
"""Set permissions to groups."""
app_label = sender.label
if (isinstance(app_label, basestring) and app_label != 'featuredrep'):
return True
perms = {'can_edit_featured': ['Admin', 'Council', 'Peers'],
'can_delete_featured': ['Admin', 'Council', 'Peers']}
add_permissions_to_groups('featuredrep', perms) | 354731d88ed6633d6cd62b73c6d1ea4cae97ca73 | 5,667 |
def download(loc, rem):
"""download rem to loc"""
# does the remote file exist
if not rem.exists():
return ReturnCode.NO_SOURCE
# does the local file exist
# if it doesnt, copy rem to loc, isLogged = False
if not loc.is_file():
return do_copy(rem, loc, False)
# is the local file older than remote
if not is_older_than(loc, rem):
return ReturnCode.NOT_OLDER
if outs.question_override(rem, loc):
return do_copy(rem, loc, False)
else:
return ReturnCode.USER_CANCEL | ff7421674f97a6923bbee6fa9be27d132d0095e3 | 5,668 |
from utils import publish_event
def read_dict (conf_dict = {}, filename = "SWIM_config"):
"""
Open and read a dictionary of key-value pairs from the file given by
filename. Use the read-in values to augment or update the dictionary passed
in, then return the new dictionary.
"""
try:
config_file = open(filename, "r")
if config_file:
line = config_file.readline().strip()
else:
line = ""
except:
message = "Unable to open config file " + filename
publish_event(message, topic = FSP_log, action = "halt_run")
print message
raise IOError, "Unable to open config file in read_dict"
try:
while line:
name, val = line.split("=")
name = name.strip()
val = val.strip()
conf_dict[name] = val
if config_file:
line = config_file.readline().strip()
else:
line = ""
config_file.close()
return conf_dict
except Exception, ex:
print "Unable to augment conf_dict in read_dict: %s" % ex
raise IOError, "Unable to augment conf_dict in read_dict" | 655699cf8c0c007c8e66f15a75bf778686c7d8d9 | 5,669 |
def ordinals_to_ordinals(s):
""" Example:
'third' -> '3rd'
Up to 31st (intended for dates)
"""
for val in ordinals.keys():
s = s.replace(val, ordinals[val])
return s | 4d45a9cfa0171a42deaf99d2e34e41dd5be6c96c | 5,670 |
def create_dataframe_schema():
"""
Create dataframe schema
"""
return pd.DataFrame(columns=['Station_id', 'Name']) | 9f15aa5fb72716e0e398554caabafb972261e5ca | 5,671 |
import requests
def shutdown_check_handler():
"""This checks the AWS instance data URL to see if there's a pending
shutdown for the instance.
This is useful for AWS spot instances. If there is a pending shutdown posted
to the instance data URL, we'll use the result of this function break out of
the processing loop and shut everything down ASAP before the instance dies.
Returns
-------
bool
- True if the instance is going to die soon.
- False if the instance is still safe.
"""
url = 'http://169.254.169.254/latest/meta-data/spot/instance-action'
try:
resp = requests.get(url, timeout=1.0)
resp.raise_for_status()
stopinfo = resp.json()
if 'action' in stopinfo and stopinfo['action'] in ('stop',
'terminate',
'hibernate'):
stoptime = stopinfo['time']
LOGWARNING('instance is going to %s at %s' % (stopinfo['action'],
stoptime))
resp.close()
return True
else:
resp.close()
return False
except HTTPError:
resp.close()
return False
except Exception:
resp.close()
return False | dd5a7c3b3ab856d72afe01a19b2389071c4e70f3 | 5,672 |
def cal_d(date=cal_date.today(), zf=True):
"""
Month, optionally left-padded with zeroes (default: pad)
"""
day_num = "d" if zf else "-d" # optionally zero fill
return date.strftime(f"%{day_num}") | c0501449035c10f3c4b05a8f9088b30d5789f662 | 5,673 |
from typing import Mapping
from typing import Sequence
from typing import Tuple
def get_max_total(map_of_maps: Mapping[Sequence[str], Mapping[Tuple, float]]) -> float:
"""
>>> df = get_sample_df()
>>> get_max_total(calculate_kls_for_attackers(df, [1]))
1.3861419037664793
>>> get_max_total(calculate_kls_for_attackers(df))
3.0817041659455104
"""
return max(get_map_of_totals(map_of_maps).values()) | 6480676c3960e36e434dd8b229ddbd840fbcaa7a | 5,674 |
import requests
def get_keeper_token(host: str, username: str, password: str) -> str:
"""Get a temporary auth token from LTD Keeper.
Parameters
----------
host : `str`
Hostname of the LTD Keeper API (e.g., ``'https://keeper.lsst.codes'``).
username : `str`
Username.
password : `str`
Password.
Returns
-------
token : `str`
LTD Keeper API token.
Raises
------
KeeperError
Raised if the LTD Keeper API cannot return a token.
"""
token_endpoint = urljoin(host, "/token")
r = requests.get(token_endpoint, auth=(username, password))
if r.status_code != 200:
raise KeeperError(
"Could not authenticate to {0}: error {1:d}\n{2}".format(
host, r.status_code, r.json()
)
)
return r.json()["token"] | 4c1feb095b3409786c5bac62aed41939f31a1431 | 5,675 |
import json
def edit_comment(post_id, comment_id):
"""Edit a comment from a specific post"""
post = posts.get(post_id)
if not post:
return json.dumps({"error": "Post Not Found"}), 404
comments = post["comments"]
comment = comments.get(comment_id)
if not comment:
return json.dumps({"error": "Comment Not Found"}), 404
body = json.loads(request.data)
text = body.get("text")
if not text:
return json.dumps({"error": "Missing fields in the body"}), 400
comment["text"] = text
return json.dumps(comment), 200 | dba79ed0bbdbc48b804a5a9f446c289f47606a75 | 5,676 |
def update_transition_dirichlet(
pB, B, actions, qs, qs_prev, lr=1.0, return_numpy=True, factors="all"
):
"""
Update Dirichlet parameters that parameterize the transition model of the generative model
(describing the probabilistic mapping between hidden states over time).
Parameters
-----------
- pB [numpy nd.array, array-of-arrays (with np.ndarray entries), or Dirichlet (either single-modality or AoA)]:
The prior Dirichlet parameters of the generative model, parameterizing the agent's beliefs about the transition likelihood.
- B [numpy nd.array, object-like array of arrays, or Categorical (either single-modality or AoA)]:
The transition likelihood of the generative model.
- actions [tuple]:
A tuple containing the action(s) performed at a given timestep.
- Qs_curr [numpy 1D array, array-of-arrays (where each entry is a numpy 1D array), or Categorical (either single-factor or AoA)]:
Current marginal posterior beliefs about hidden state factors
- Qs_prev [numpy 1D array, array-of-arrays (where each entry is a numpy 1D array), or Categorical (either single-factor or AoA)]:
Past marginal posterior beliefs about hidden state factors
- eta [float, optional]:
Learning rate.
- return_numpy [bool, optional]:
Logical flag to determine whether output is a numpy array or a Dirichlet
- which_factors [list, optional]:
Indices (in terms of range(Nf)) of the hidden state factors to include in learning.
Defaults to 'all', meaning that transition likelihood matrices for all hidden state factors
are updated as a function of transitions in the different control factors (i.e. actions)
"""
pB = utils.to_numpy(pB)
if utils.is_arr_of_arr(pB):
n_factors = len(pB)
else:
n_factors = 1
if return_numpy:
pB_updated = pB.copy()
else:
pB_updated = utils.to_dirichlet(pB.copy())
if not utils.is_distribution(qs):
qs = utils.to_categorical(qs)
if factors == "all":
if n_factors == 1:
db = qs.cross(qs_prev, return_numpy=True)
db = db * (B[:, :, actions[0]] > 0).astype("float")
pB_updated = pB_updated + (lr * db)
elif n_factors > 1:
for f in range(n_factors):
db = qs[f].cross(qs_prev[f], return_numpy=True)
db = db * (B[f][:, :, actions[f]] > 0).astype("float")
pB_updated[f] = pB_updated[f] + (lr * db)
else:
for f_idx in factors:
db = qs[f_idx].cross(qs_prev[f_idx], return_numpy=True)
db = db * (B[f_idx][:, :, actions[f_idx]] > 0).astype("float")
pB_updated[f_idx] = pB_updated[f_idx] + (lr * db)
return pB_updated | 3b41320f20abee2cec4cfa651d932a388c4595c2 | 5,677 |
def MRP2Euler231(q):
"""
MRP2Euler231(Q)
E = MRP2Euler231(Q) translates the MRP
vector Q into the (2-3-1) euler angle vector E.
"""
return EP2Euler231(MRP2EP(q)) | 09929b4858eb0f8a755b623fa86b6d77333e9f6b | 5,678 |
def get_entry_or_none(base: dict, target, var_type=None):
"""Helper function that returns an entry or None if key is missing.
:param base: dictionary to query.
:param target: target key.
:param var_type: Type of variable this is supposed to be (for casting).
:return: entry or None.
"""
if target not in base:
return None
if var_type is not None:
return var_type(base[target])
return base[target] | b3855be0c7d2c3bdd42e57ae959bb97409abe828 | 5,681 |
def group_list(request):
"""
List all gourps, or create a new group.
"""
if request.method == 'GET':
tasks = Group.objects.all()
serializer = GroupSerializer(tasks, many=True)
return Response(serializer.data)
elif request.method == 'POST':
unique_name = request.data.get("unique_name")
display_name = request.data.get("display_name")
if unique_name and display_name:
checkgoup = Group.objects.filter(unique_name=unique_name).first()
if checkgoup:
res = {"code": 400,
"message": "Ops!, Unique name already exists"}
return Response(data=res,
status=400)
else:
res = {"code": 400,
"message":
"Ops!, Unique name and display name can't be null"}
return Response(data=res,
status=400)
group = Group.create(unique_name, display_name)
group.save()
serializer = GroupSerializer(group, many=False)
return JsonResponse(serializer.data, safe=False) | 39e3d67aa88008541898aea2d21d5a811ec17699 | 5,682 |
def CreateBooleanSplit(meshesToSplit, meshSplitters, multiple=False):
"""
Splits a set of meshes with another set.
Args:
meshesToSplit (IEnumerable<Mesh>): A list, an array, or any enumerable set of meshes to be split. If this is null, None will be returned.
meshSplitters (IEnumerable<Mesh>): A list, an array, or any enumerable set of meshes that cut. If this is null, None will be returned.
Returns:
Mesh[]: A new mesh array, or None on error.
"""
url = "rhino/geometry/mesh/createbooleansplit-mesharray_mesharray"
if multiple: url += "?multiple=true"
args = [meshesToSplit, meshSplitters]
if multiple: args = list(zip(meshesToSplit, meshSplitters))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response | 56f8956b9cce7bd9467ac23b80a7573a889c05bf | 5,683 |
def template14():
"""Simple ML workflow"""
script = """
## (Enter,datasets)
<< host = chemml
<< function = load_cep_homo
>> smiles 0
>> homo 4
## (Store,file)
<< host = chemml
<< function = SaveFile
<< format = smi
<< header = False
<< filename = smiles
>> 0 df
>> filepath 1
## (Represent,molecular descriptors)
<< host = chemml
<< function = RDKitFingerprint
>> 1 molfile
>> df 2
>> df 3
## (Store,file)
<< host = chemml
<< function = SaveFile
<< filename = fps_rdkfp
>> 2 df
## (Prepare,split)
<< host = sklearn
<< function = train_test_split
>> 3 dfx
>> 4 dfy
>> dfx_train 5
>> dfy_train 6
>> dfx_test 8
>> dfy_test 11
## (Model,regression)
<< host = sklearn
<< function = MLPRegressor
<< func_method = fit
>> 5 dfx
>> 6 dfy
>> api 7
## (Model,regression)
<< host = sklearn
<< function = MLPRegressor
<< func_method = predict
>> 7 api
>> 8 dfx
>> dfy_predict 9
>> dfy_predict 10
## (Store,file)
<< host = chemml
<< function = SaveFile
<< filename = dfy_predict
>> 9 df
## (Visualize,plot)
<< host = chemml
<< function = scatter2D
<< x = 0
<< y = 0
>> 10 dfx
>> 11 dfy
>> fig 12
## (Store,figure)
<< host = chemml
<< function = SavePlot
<< filename = dfy_actual_vs_dfy_predict
<< output_directory = .
>> 13 fig
## (Visualize,artist)
<< host = chemml
<< function = decorator
<< title = true vs. predicted HOMO energy
<< xlabel = predicted HOMO energy (eV)
<< ylabel = true HOMO energy (eV)
<< grid = True
<< grid_color = g
<< size = 18
>> 12 fig
>> fig 13
"""
return script.strip().split('\n') | d321d2016f0894d0a0538a09f6bc17f3f690317b | 5,684 |
def get_group_selector(*args):
"""
get_group_selector(grpsel) -> sel_t
Get common selector for a group of segments.
@param grpsel: selector of group segment (C++: sel_t)
@return: common selector of the group or 'grpsel' if no such group is
found
"""
return _ida_segment.get_group_selector(*args) | 6b750702186d70f2b21b64c13145d8da7bfd0b9c | 5,685 |
import textwrap
def wrap(text=cert_text) -> str:
"""Wraps the given text using '\n' to fit the desired width."""
wrapped_text = textwrap.fill(text, fit_char())
return wrapped_text | a6e42a7ca8fa78e7be89e31a920e8b3baa95245e | 5,686 |
def encode(data):
"""calls simplejson's encoding stuff with our needs"""
return simplejson.dumps(
data,
cls=CahootsEncoder,
ensure_ascii=False,
encoding='utf8',
indent=4
) | d3577e87b830b17222614d978d78eaa8329843bd | 5,687 |
def SE_HRNet_W48_C(pretrained=False, use_ssld=False, **kwargs):
"""
SE_HRNet_W48_C
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `SE_HRNet_W48_C` model depends on args.
"""
model = HRNet(width=48, has_se=True, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W48_C"], use_ssld)
return model | 0204ff852a18e6319c0454c5fd81d748626fcf78 | 5,688 |
from typing import Union
from typing import Any
from typing import List
def manifest(argument: Union[Any, List[Any]], data: bytearray) -> Union[Any, List[Any]]:
"""
Returns the manifestation of a `refinery.lib.argformats.LazyEvaluation`
on the given data. This function can change the data.
"""
if isinstance(argument, (list, tuple)):
return [manifest(x, data) for x in argument]
return argument(data) if isinstance(argument, LazyEvaluation) else argument | b8c5335494fda972c09a6d1937344783fb91ea80 | 5,689 |
def get_child_hwnd_by_class(hwnd: int, window_class: str) -> int:
"""Enumerates the child windows that belong to the specified parent window by passing the handle to
each child window.
:param hwnd: HWND in decimal
:param window_class: window class name
:return: window handle (HWND)
"""
def callback(hwnd, extra):
if extra['equals'] == win32gui.GetClassName(hwnd):
extra['res'] = hwnd
extra = {'res': None, 'equals': window_class}
win32gui.EnumChildWindows(hwnd, callback, extra)
return extra['res'] | 0d6b7cb56b483c88305611520e9787ed4321b6ac | 5,690 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.