content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def create_task_dialog(request):
"""called when creating tasks
"""
return data_dialog(request, mode='create', entity_type='Task') | 6712048914a8417792b0dc8a1ab60c081886d4fa | 19,279 |
def raw_rearrange(da, pattern, **kwargs):
"""Crudely wrap `einops.rearrange <https://einops.rocks/api/rearrange/>`_.
Wrapper around einops.rearrange with a very similar syntax.
Spaces, parenthesis ``()`` and `->` are not allowed in dimension names.
Parameters
----------
da : xarray.DataArray
Input array
pattern : string
Pattern string. Same syntax as patterns in einops with two
caveats:
* Unless splitting or stacking, you must use the actual dimension names.
* When splitting or stacking you can use `(dim1 dim2)=dim`. This is `necessary`
for the left hand side as it identifies the dimension to split, and
optional on the right hand side, if omitted the stacked dimension will be given
a default name.
kwargs : dict, optional
Passed to :func:`xarray_einstats.einops.rearrange`
Returns
-------
xarray.DataArray
See Also
--------
xarray_einstats.einops.rearrange:
More flexible and powerful wrapper over einops.rearrange. It is also more verbose.
"""
if "->" in pattern:
in_pattern, out_pattern = pattern.split("->")
in_dims = translate_pattern(in_pattern)
else:
out_pattern = pattern
in_dims = None
out_dims = translate_pattern(out_pattern)
return rearrange(da, out_dims=out_dims, in_dims=in_dims, **kwargs) | a16c8e439882acba930fa143e9d2428d38d1ca70 | 19,280 |
from typing import Tuple
def get_users() -> Tuple[int, ...]:
"""Count user ids in db."""
db = get_database_connection()
user_searches = db.keys(pattern=f'{DB_SEARCH_PREFIX}*')
user_ids = [
int(user_search.decode('utf-8').lstrip(DB_SEARCH_PREFIX))
for user_search in user_searches
]
return tuple(user_ids) | 2eaded42444fe4ad5395387ddd45022a9e8736ce | 19,281 |
from typing import Optional
async def login(
email: str,
password: str,
session: Optional[ClientSession] = None,
*,
conf_update_interval: Optional[timedelta] = None,
device_set_debounce: Optional[timedelta] = None,
):
"""Login using email and password."""
if session:
response = await _do_login(session, email, password, headers=_headers(""))
else:
async with ClientSession() as _session:
response = await _do_login(_session, email, password)
return Client(
response.get("userunits", '0'),
session,
conf_update_interval=conf_update_interval,
device_set_debounce=device_set_debounce,
) | 274f0785eb0e2fb3b73cfc4c2810df03df52d7b1 | 19,282 |
def rcomp_prediction(system, rcomp, predargs, init_cond):
""" Make a prediction with the given system
Parameters:
system (str): Name of the system to predict
rcomp (ResComp): Trained reservoir computer
predargs (variable length arguments): Passed directly into rcomp.predict
init_cond (dict): Keyword args passed rcomp.predict
Returns:
pre (ndarray): Reservoir computer prediction
"""
if system == "softrobot":
pre = rcomp.predict(*predargs, **init_cond)
else:
pre = rcomp.predict(predargs, **init_cond)
return pre | fb4eb3e710335788333a12abcd494015f4784a78 | 19,283 |
def get_best_model(X ,y):
"""Select best model from RandomForestClassifier and AdaBoostClassifier"""
ensembles = [
(RandomForestClassifier, SelectParam({
'estimator': RandomForestClassifier(warm_start=True, random_state=7),
'param_grid': {
'n_estimators': [10, 15, 20],
'criterion': ['gini', 'entropy'],
'max_features': [FEATURE_NUM+n for n in [-4, -2, 0]],
'max_depth': [10, 15],
'bootstrap': [True],
'warm_start': [True],
},
'n_jobs':1
})),
(AdaBoostClassifier, SelectParam({
'estimator': AdaBoostClassifier(random_state=7),
'param_grid': {
'algorithm': ['SAMME', 'SAMME.R'],
'n_estimators': [10, 15, 20],
'learning_rate': [1e-3, 1e-2, 1e-1]
},
'n_jobs': 1
}))
]
best_score = 0
best_model = None
for ensemble, select in ensembles:
param = select.get_param(X, y)
model = ensemble(**param)
score = cross_val_score(model, X, y).mean()
if best_score < score:
best_score = score
best_model = model
return best_model | c1ac787d89a0086c263490b12081e0bfe98c6c57 | 19,284 |
def compareDates(dateA: list, dateB: list) -> int:
"""
Compares dateA and dateB\n
returns: 1 if dateA > dateB,\n
-1 if dateA <dateB,
0 if dateA == dateB \n
raise Exception if dates are invalid
"""
if not checkDateValidity(dateA, dateB):
raise invalidDateException('Invalid Dates')
i = 2
while i >= 0:
if dateA[i] < dateB[i]:
return -1
elif dateA[i] > dateB[i]:
return 1
else:
if i == 0:
return 0
i -= 1 | 927af2e0164706e8013badd90638b2561ab74241 | 19,285 |
def renderPage(res, topLevelContext=context.WebContext,
reqFactory=FakeRequest):
"""
Render the given resource. Return a Deferred which fires when it has
rendered.
"""
req = reqFactory()
ctx = topLevelContext(tag=res)
ctx.remember(req, inevow.IRequest)
render = appserver.NevowRequest(None, True).gotPageContext
result = render(ctx)
result.addCallback(lambda x: req.accumulator)
return result | 136a06274c9cbb34951a7d2b8544328b4a1f4b60 | 19,286 |
def get_header_value(headers, name, default=None):
""" Return header value, doing case-insensitive match """
if not headers:
return default
if isinstance(headers, dict):
headers = headers.items()
name = to_bytes(name.lower())
for k, v in headers:
if name == to_bytes(k.lower()):
return v
return default | 9ddb9754061554bd59b429b78e472bd514e4c14d | 19,287 |
def parse_gt_from_anno(img_anno, classes):
"""parse_gt_from_anno"""
print('parse ground truth files...')
ground_truth = {}
for img_name, annos in img_anno.items():
objs = []
for anno in annos:
if anno[1] == 0. and anno[2] == 0. and anno[3] == 0. and anno[4] == 0.:
continue
if int(anno[0]) == -1:
continue
xmin = anno[1]
ymin = anno[2]
xmax = xmin + anno[3] - 1
ymax = ymin + anno[4] - 1
xmin = int(xmin)
ymin = int(ymin)
xmax = int(xmax)
ymax = int(ymax)
cls = classes[int(anno[0])]
gt_box = {'class': cls, 'box': [xmin, ymin, xmax, ymax]}
objs.append(gt_box)
ground_truth[img_name] = objs
return ground_truth | 63ba02bb0511cdc02245528041257639e764605f | 19,288 |
def pt_to_tup(pt):
"""
Convenience method to generate a pair of two ints from a tuple or list.
Parameters
----------
pt : list OR tuple
Can be a list or a tuple of >=2 elements as floats or ints.
Returns
-------
pt : tuple of int
A pair of two ints.
"""
return (int(pt[0]),int(pt[1])); | 7013b2477959f528b98d364e4cc44ac8700fb366 | 19,289 |
def _operation(m1, m2, op, k):
"""Generalized function for basic"""
"""matrix operations"""
n = len(m1)
res = [n*[0] for i in range(n)]
if n == len(m2):
for i in range(n):
for j in range(n):
tab = {
"+" : m1[i][j]+m2[i][j],
"-" : m1[i][j]-m2[i][j],
"*s": m1[i][j]*k,
}
res[i][j] = tab[op]
return res | 5e00ad1a9fbadb9712631450b106b81e5a3413ed | 19,290 |
def jacobi_d1(x, n, alpha, beta):
"""Evaluate the first derivative of Jacobi polynomial at x using eq. A.1.8
Args:
x: the location where the value will be evaluated
n: the order of Jacobi polynomial
alpha: the alpha parameter of Jacobi polynomial
beta: the beta parameter of Jacobi polynomial
Returns:
the first derivative of Jacobi polynomial at x
Raises:
None
"""
jacobi_check(n, alpha, beta)
if n == 0:
return 0.
else:
return 0.5 * (alpha + beta + n + 1) * \
jacobi_r(x, n - 1, alpha + 1, beta + 1) | 4a982827916466fad0ed812d2ec3792ca1605f0a | 19,291 |
def gate_expand_1toN(U, N, target):
"""
Create a Qobj representing a one-qubit gate that act on a system with N
qubits.
Parameters
----------
U : Qobj
The one-qubit gate
N : integer
The number of qubits in the target space.
target : integer
The index of the target qubit.
Returns
-------
gate : qobj
Quantum object representation of N-qubit gate.
"""
if N < 1:
raise ValueError("integer N must be larger or equal to 1")
if target >= N:
raise ValueError("target must be integer < integer N")
return tensor([identity(2)] * (target) + [U] +
[identity(2)] * (N - target - 1)) | efb3d4da51e2f6dc90ba7bcf5e085cb651a4fed0 | 19,292 |
from typing import Dict
from typing import Any
from typing import List
def build_component_dependency_graph(
pipeline_definition: Dict[str, Any], component_definitions: Dict[str, Any]
) -> DiGraph:
"""
Builds a dependency graph between components. Dependencies are:
- referenced components during component build time (e.g. init params)
- predecessor components in the pipeline that produce the needed input
This enables sorting the components in a working and meaningful order for instantiation using topological sorting.
:param pipeline_definition: the definition of the pipeline (e.g. use get_pipeline_definition() to obtain it)
:param component_definitions: the definition of the pipeline components (e.g. use get_component_definitions() to obtain it)
"""
graph = DiGraph()
for component_name, component_definition in component_definitions.items():
params = component_definition.get("params", {})
referenced_components: List[str] = list()
for param_value in params.values():
# Currently we don't do any additional type validation here.
# See https://github.com/deepset-ai/haystack/pull/2253#discussion_r815951591.
if param_value in component_definitions:
referenced_components.append(param_value)
for referenced_component in referenced_components:
graph.add_edge(referenced_component, component_name)
for node in pipeline_definition["nodes"]:
node_name = node["name"]
graph.add_node(node_name)
for input in node["inputs"]:
if input in component_definitions:
# Special case for (actually permitted) cyclic dependencies between two components:
# e.g. DensePassageRetriever depends on ElasticsearchDocumentStore.
# In indexing pipelines ElasticsearchDocumentStore depends on DensePassageRetriever's output.
# But this second dependency is looser, so we neglect it.
if not graph.has_edge(node_name, input):
graph.add_edge(input, node_name)
return graph | c70655e4d2b2405d991af43d4a1ad67eb1d8c9d3 | 19,293 |
def count_distribution_artefacts(distribution_artefacts):
"""
Count distribution artefacts in nested list.
:param distribution_artefacts: Nested list containing distribution artefacts mapped to media packages and tenants
:type distribution_artefacts: dict
:return: Amount of distribution artefacts
:rtype: int
"""
return sum([sum([len(distribution_artefacts[tenant][media_package]) for media_package in
distribution_artefacts[tenant].keys()]) for tenant in distribution_artefacts.keys()]) | b9bc159523e8cbb4745d8b7e8897360f6f9c1960 | 19,294 |
def nelson_siegel_yield(tau, theta):
"""For details, see here.
Parameters
----------
tau : array, shape (n_,)
theta : array, shape (4,)
Returns
-------
y : array, shape (n_,)
"""
y = theta[0] - theta[1] * \
((1 - np.exp(-theta[3] * tau)) /
(theta[3] * tau)) + theta[2] * \
((1 - np.exp(-theta[3] * tau)) /
(theta[3] * tau) - np.exp(-theta[3] * tau))
return np.squeeze(y) | ba328c7698f088c3e371462b6a92c62517054af5 | 19,295 |
def fix_filename(filename):
"""Replace illegal or problematic characters from a filename."""
return filename.translate(_filename_trans) | dc8c8e1f85a7372db97273ae595ff69520824574 | 19,297 |
def QDenseModel(weights_f, load_weights=False):
"""Construct QDenseModel."""
x = x_in = Input((RESHAPED,), name="input")
x = QActivation("quantized_relu(4)", name="act_i")(x)
x = QDense(N_HIDDEN, kernel_quantizer=ternary(),
bias_quantizer=quantized_bits(4, 0, 1), name="dense0")(x)
x = QActivation("quantized_relu(2)", name="act0")(x)
x = QDense(
NB_CLASSES,
kernel_quantizer=quantized_bits(4, 0, 1),
bias_quantizer=quantized_bits(4, 0, 1),
name="dense2")(
x)
x = Activation("softmax", name="softmax")(x)
model = Model(inputs=[x_in], outputs=[x])
model.summary()
model.compile(loss="categorical_crossentropy",
optimizer=OPTIMIZER, metrics=["accuracy"])
if load_weights and weights_f:
model.load_weights(weights_f)
print_qstats(model)
return model | c84f591866708ea086e8c22ff333d60381e1f865 | 19,298 |
def fmt_uncertainty(x, dx, sn=None, sn_cutoff=8, unit=None):
"""Format uncertainty for latex."""
n_decimals = -int(np.floor(np.log10(np.abs(dx))))
leading_magnitude = np.abs(dx)/10**-n_decimals
if leading_magnitude <= 1.5:
n_decimals += 1
if sn is None:
if np.abs(x) >= 10**sn_cutoff or np.abs(x) <= 10**-sn_cutoff:
sn = True
else:
sn = False
if sn:
exponent = int(np.floor(np.log10(np.abs(x))))
x_mag = np.abs(x)/10**exponent
dx_mag = np.abs(dx)/10**exponent
else:
exponent = 0
x_mag = np.abs(x)
dx_mag = np.abs(dx)
x_round = np.round(x_mag, decimals=n_decimals + exponent)
dx_round = np.round(dx_mag, decimals=n_decimals + exponent)
if dx_round > 1.5:
x_str = str(int(x_round))
else:
x_str = str(x_round)
dx_str = str(dx_round)
if sn:
fmt_str = r'(%s \pm %s)\times {10}^{%s}' % (
x_str, dx_str, exponent
)
if x < 0:
fmt_str = f'-{fmt_str}'
else:
fmt_str = r'%s \pm %s' % (x_str, dx_str)
if x < 0:
fmt_str = f'-({fmt_str})'
if unit is not None:
if '(' not in fmt_str:
fmt_str = f'({fmt_str})'
fmt_str += r'\ \mathrm{%s}' % unit
fmt_str = f'${fmt_str}$'
return fmt_str | bcfbd5f22adc6a6afac1a659afe62b893ad34f1b | 19,300 |
def seed_test_input(clusters, limit):
"""
Select the seed inputs for fairness testing
:param clusters: the results of K-means clustering
:param limit: the size of seed inputs wanted
:return: a sequence of seed inputs
"""
i = 0
rows = []
max_size = max([len(c[0]) for c in clusters])
while i < max_size:
if len(rows) == limit:
break
for c in clusters:
if i >= len(c[0]):
continue
row = c[0][i]
rows.append(row)
if len(rows) == limit:
break
i += 1
return np.array(rows) | 03462327a6554c966d58d1e1982b40bd396e88c9 | 19,301 |
import numpy
def calc_senescence_water_shading(
aglivc, bgwfunc, fsdeth_1, fsdeth_3, fsdeth_4):
"""Calculate shoot death due to water stress and shading.
In months where senescence is not scheduled to occur, some shoot death
may still occur due to water stress and shading.
Parameters:
aglivc (numpy.ndarray): state variable, carbon in aboveground live
biomass
bgwfunc (numpy.ndarray): derived, effect of soil moisture on
decomposition and shoot senescence
fsdeth_1 (numpy.ndarray): parameter, maximum shoot death rate at very
dry soil conditions
fsdeth_3 (numpy.ndarray): parameter, additional fraction of shoots
which die when aglivc is greater than fsdeth_4
fsdeth_4 (numpy.ndarray): parameter, threshold value for aglivc
above which shading increases senescence
Returns:
fdeth, fraction of aboveground live biomass that is converted to
standing dead
"""
valid_mask = (
(~numpy.isclose(aglivc, _SV_NODATA)) &
(bgwfunc != _TARGET_NODATA) &
(fsdeth_1 != _IC_NODATA) &
(fsdeth_3 != _IC_NODATA) &
(fsdeth_4 != _IC_NODATA))
fdeth = numpy.empty(aglivc.shape, dtype=numpy.float32)
fdeth[:] = _TARGET_NODATA
fdeth[valid_mask] = fsdeth_1[valid_mask] * (1. - bgwfunc[valid_mask])
shading_mask = ((aglivc > fsdeth_4) & valid_mask)
fdeth[shading_mask] = fdeth[shading_mask] + fsdeth_3[shading_mask]
fdeth[valid_mask] = numpy.minimum(fdeth[valid_mask], 1.)
return fdeth | d45fbeaa24138b46fd88caa815f5e15c4098a7e5 | 19,302 |
def hello(friend_name: float = None) -> str:
"""Function to greet the user, takes a string and return Hello, 'string'"""
if not isinstance(friend_name, str):
raise TypeError("this function expects a string as input")
return f'Hello, {friend_name}!' | 9e5ab340fdeb1bed2cd48758bab8a0815ae6f75c | 19,303 |
def flatten(lst):
"""Shallow flatten *lst*"""
return [a for b in lst for a in b] | 203e971e43aea4d94bfa0ffa7057b416ef0bf545 | 19,304 |
def _transform_org_units(metadata: dict) -> pd.DataFrame:
"""Transform org units metadata into a formatted DataFrame."""
df = pd.DataFrame.from_dict(metadata.get("organisationUnits"))
df = df[["id", "code", "shortName", "name", "path", "geometry"]]
df.columns = ["ou_uid", "ou_code", "ou_shortname", "ou_name", "path", "geometry"]
df["ou_level"] = df.path.apply(lambda x: x.count("/"))
df = df[
["ou_uid", "ou_code", "ou_shortname", "ou_name", "ou_level", "path", "geometry"]
] # Reorder columns
return df | a90532e5d5b09aeb2a85addad518c54301710141 | 19,305 |
def post_process(done_exec, temp_file):
"""For renaissance, `temp_file` is a path to a CSV file into which the
results were written. For other suites, it is `None`."""
if done_exec.suite == "renaissance":
assert temp_file is not None
return post_process_renaissance(done_exec, temp_file)
elif done_exec.suite == "dacapo":
assert temp_file is None
return post_process_dacapo(done_exec)
elif done_exec.suite == "specjvm":
assert temp_file is None
return post_process_specjvm(done_exec)
else:
raise ValueError("unknown suite %s" % done_exec.suite) | 615098d2ddc3806984fbc448bca377933c87973f | 19,306 |
def freeze(regex_frozen_weights):
"""Creates an optimizer that set learning rate to 0. for some weights.
Args:
regex_frozen_weights: The regex that matches the (flatten) parameters
that should not be optimized.
Returns:
A chainable optimizer.
"""
return scale_selected_parameters(regex_frozen_weights, multiplier=0.) | bc4c511a1b6a03b1dd08c63d12fb5af55e177b62 | 19,307 |
def sep_num(number, space=True):
"""
Creates a string representation of a number with separators each thousand. If space is True, then it uses spaces for
the separator otherwise it will use commas
Note
----
Source: https://stackoverflow.com/questions/16670125/python-format-string-thousand-separator-with-spaces
:param number: A number
:type number: int | float
:param space: Separates numbers with spaces if True, else with commas
:type space: bool
:return: string representation with space separation
:rtype: str
"""
if space:
return '{:,}'.format(number).replace(',', ' ')
else:
return '{:,}'.format(number) | ee7dfbb60fb01bb7b6bb84cbe56ec50dfab4b339 | 19,309 |
def raw_smooth_l1_loss(diff, delta=1.0, max_val=10.0):
"""
Creates smooth L1 loss. The regular version is sometimes unstable so here what we do is if the difference
is > some value, we will return the log instead.
So it's then
0.5 * x^2 if |x| <= d
0.5 * d^2 + d * (|x| - d) if max_val > |x| > d
0.5 * d^2 + d * (max_val - d) + d*log(1.0+|x|-max_val) if |x| > max_val
:param diff:
:param delta:
:param max_val: It turns into log after here
:return:
"""
abs_diff = tf.abs(diff)
huber_loss = tf.where(
tf.math.less(abs_diff, delta),
0.5 * tf.square(diff),
0.5 * (delta ** 2) + delta * (abs_diff - delta),
)
huber_loss_capped = tf.where(
tf.math.less(abs_diff, max_val),
huber_loss,
0.5 * (delta ** 2) + delta * (max_val - delta) + delta * tf.math.log1p(tf.math.abs(abs_diff - max_val))
)
return huber_loss_capped | 0f82ebdb72d27c1dc5e961d6905d1488d56e46b2 | 19,310 |
def read_parameters(request, view_kwargs):
"""
:param request: HttpRequest with attached api_info
:type request: HttpRequest
:type view_kwargs: dict[str, object]
:rtype: dict[str, object]
"""
params = {}
errors = {}
for param in request.api_info.operation.parameters:
try:
value = get_parameter_value(request, view_kwargs, param)
except KeyError:
if 'default' in param:
params[param['name']] = param['default']
continue
if param.get('required'): # Required but missing
errors[param['name']] = MissingParameter('parameter %s is required but missing' % param['name'])
continue
try:
params[param['name']] = cast_parameter_value(request.api_info, param, value)
except NotImplementedError:
raise
except Exception as e:
errors[param['name']] = e
if errors:
raise ErroneousParameters(errors, params)
return params | fb21c7a01fa4902e9e5adcb676731ee324438226 | 19,312 |
import getpass
def get_passwd():
"""Prompt user for a password
Prompts user to enter and confirm a password. Raises an exception
if the password is deemed to be invalid (e.g. too short), or if
the password confirmation fails.
Returns:
Password string entered by the user.
"""
passwd = getpass.getpass("Enter password for new user: ")
if not validate_password(passwd):
raise Exception("Invalid password: must be 6 or more characters")
passwd2 = getpass.getpass("Confirm password: ")
if passwd2 != passwd:
raise Exception("Passwords don't match")
return passwd | 214f6078e07259eea55aa9bd6269cfc7742f7edc | 19,314 |
def _render_footnote_block_open(self, tokens, idx, options, env):
"""Render the footnote opening without the hr tag at the start."""
html = mdit_py_plugins.footnote.index.render_footnote_block_open(
self, tokens, idx, options, env
)
lines = html.split("\n")
if lines[0].strip().startswith("<hr"):
lines = lines[1:]
return "\n".join(lines) | 56c3e6bfaada2bf5c4ea1fa48ebe4e5cd53dbea0 | 19,315 |
def remove_shot_from_scene(scene, shot, client=default):
"""
Remove link between a shot and a scene.
"""
scene = normalize_model_parameter(scene)
shot = normalize_model_parameter(shot)
return raw.delete(
"data/scenes/%s/shots/%s" % (scene["id"], shot["id"]),
client=client
) | ca6db744a60be7ee287aa95ba647f53b6afff42e | 19,316 |
def all_results_failed(subsystems):
"""Check if all results have failed status"""
for subsystem in subsystems.values():
if subsystem['subsystemStatus'] == 'OK':
# Found non-failed subsystem
return False
# All results failed
return True | 6612397c5b1605ad3e623e3c47264869c93cd47d | 19,317 |
from src.praxxis.sqlite import connection
def get_scene_id(current_scene_db):
"""gets the scene ID from the scene db"""
conn = connection.create_connection(current_scene_db)
cur = conn.cursor()
get_scene_id = 'SELECT ID FROM "SceneMetadata"'
cur.execute(get_scene_id)
id = str(cur.fetchone()[0])
conn.close()
return id | 95c7a6ab2ef9484205fab57540ac71da22734428 | 19,318 |
def compile_playable_podcast1(playable_podcast1):
"""
@para: list containing dict of key/values pairs for playable podcasts
"""
items = []
for podcast in playable_podcast1:
items.append({
'label': podcast['title'],
'thumbnail': podcast['thumbnail'],
'path': podcast['url'],
# 'info': podcast['desc'],
'is_playable': True,
})
return items | faf5e03aa4472a20564df9b0872ce58ffa8bce79 | 19,319 |
import math
def mouseclick(pos):
"""
Define "mouse click" event handler; implements game
"state" logic. It receives a parameter; pair of screen
coordinates, i.e. a tuple of two non-negative integers
- the position of the mouse click.
"""
# User clicks on a "card" of the "deck" (grid of
# evenly distributed cells - cards placeholders).
# Compute the index of this "card", i.e. determine
# which card have been clicked on with the mouse.
# Recall that the sequence of cards entirely fills
# the "canvas".
clicked_card_index = int(math.floor(float(pos[0]) / CARD_PLACEHOLDER_WIDTH))
# If user clicks on a card already "exposed"; ignore
# event and "return" function immediately.
if deck_of_cards_exposed[clicked_card_index]:
return None
# The counter of "turns" playing the game will be
# updated as a global variable.
global turn
# The following block implements the game logic for
# selecting two "cards" and determining if they match.
# State 0 corresponds to the start of the game.
# In state 0, if you click on a card, that card is
# exposed, and you switch to state 1.
# State 1 corresponds to a single exposed unpaired
# card.
# In state 1, if you click on an unexposed card, that
# card is exposed and you switch to state 2.
# State 2 corresponds to the end of a turn.
# In state 2, if you click on an unexposed card, that
# card is exposed and you switch to state 1.
global state
if state == 0:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Store the "index" of the "exposed" card.
# This is the first card "exposed" in this "turn"
# of the game.
index_of_cards_exposed_in_a_turn[0] = clicked_card_index
# Update "turn" counter; incremented after the
# first "card" is flipped during a turn.
turn += 1
label.set_text("Turns = " + str(turn))
# Switch to the next game "state".
state = 1
elif state == 1:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Store the "index" of the "exposed" card.
# This is the second card "exposed" in this "turn"
# of the game.
index_of_cards_exposed_in_a_turn[1] = clicked_card_index
# Switch to the next game "state".
state = 2
else:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Get the value of the cards exposed in the previous
# "turn" of the game (taking advantage of the
# "indexes" stored). Then determine if the previous
# two "exposed" cards are paired or unpaired.
# If unpaired then switch the "status" of these
# cards back to "unexposed"; i.e. flip them back
# over so that they are hidden before moving to
# state 1.
if deck_of_cards[index_of_cards_exposed_in_a_turn[0]] != deck_of_cards[index_of_cards_exposed_in_a_turn[1]]:
deck_of_cards_exposed[index_of_cards_exposed_in_a_turn[0]] = False
deck_of_cards_exposed[index_of_cards_exposed_in_a_turn[1]] = False
# Store the "index" of the "exposed" card.
# This is the first card "exposed" in this "turn"
# of the game, i.e. replace the "index" of the
# first card "exposed" in the previous "turn" of
# the game.
index_of_cards_exposed_in_a_turn[0] = clicked_card_index
# Update "turn" counter; incremented after the
# first "card" is flipped during a turn.
turn += 1
label.set_text("Turns = " + str(turn))
# Switch to the next game "state".
state = 1
return None | d283e9f9dd8581cf3d1daf997ff72af01e8d32cc | 19,320 |
def fixture_version_obj(bundle_data: dict, store: Store) -> models.Version:
"""Return a version object"""
return store.add_bundle(bundle_data)[1] | 4088df55621dc5cf43aefca07e8f34b8bfdf5e42 | 19,322 |
def ranges_compute(x, n_bins):
"""Computation of the ranges (borders of the bins).
Parameters
----------
x: pd.DataFrame
the data variable we want to obtain its distribution.
n_bins: int
the number of bins we want to use to plot the distribution.
Returns
-------
ranges: np.ndarray
the borders of the bins.
"""
mini = np.nanmin(np.array(x))
maxi = np.nanmax(np.array(x))
ranges = np.linspace(mini, maxi, n_bins+1)
return ranges | 28c0fcb61bc5a6417430e7b44972ba9e57a3914c | 19,323 |
def creation(basis_size: int, state_index: int) -> spr.csc_matrix:
"""
Generates the matrix of the fermionic creation operator for a given single particle state
:param basis_size: The total number of states in the single particle basis
:param state_index: The index of the state to be created by the operator
:return: The matrix of the many-body creation operator (2^basis_size x 2^basis_size sparse matrix)
"""
many_particle_basis_size = 2**basis_size
temp_matrix = spr.dok_matrix((many_particle_basis_size, many_particle_basis_size))
single_particle_state_mask = 1 << (state_index-1)
for state_to_act_on in range(many_particle_basis_size):
if ~state_to_act_on & single_particle_state_mask:
temp_matrix[state_to_act_on | single_particle_state_mask, state_to_act_on] = (
_anticommutation_factor(state_to_act_on, basis_size, state_index)
)
return temp_matrix.tocsc() | 7967aa861a1366a4187a70dad31c32625410fd3d | 19,324 |
def LowercaseMutator(current, value):
"""Lower the value."""
return current.lower() | 7fed0dc4533948c54b64f649e2c85dca27ee9bc5 | 19,325 |
import collections
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, verbose_logging, logger):
"""Write final predictions to the json file."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit", "rank_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index],
rank_logit=result.rank_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit + x.rank_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit", "rank_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging, logger)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit,
rank_logit=pred.rank_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0, rank_logit=0.))
assert len(nbest) >= 1
span_scores = []
rank_scores = []
for entry in nbest:
span_scores.append(entry.start_logit + entry.end_logit)
rank_scores.append(entry.rank_logit)
final_scores = [span_score + rank_score for span_score, rank_score
in zip(span_scores, rank_scores)]
nbest_indexes = np.argsort(final_scores)[::-1]
nbest_json = []
for index in nbest_indexes:
entry = nbest[index]
output = collections.OrderedDict()
output["text"] = entry.text
output["final_score"] = final_scores[index]
output["span_score"] = span_scores[index]
output["rank_score"] = rank_scores[index]
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json | a8ecc1eda5b7fc8b780f2d207e3d09d2b20f11ca | 19,327 |
def setup_view(view, request, *args, **kwargs):
"""Mimic ``as_view()``, but returns view instance.
Use this function to get view instances on which you can run unit tests,
by testing specific methods.
See https://stackoverflow.com/a/33647251 and
http://django-downloadview.readthedocs.io/en/latest/testing.html#django_downloadview.test.setup_view"""
view.request = request
view.args = args
view.kwargs = kwargs
return view | 156710acfdb383c23d844fe8de4f78a1190193d9 | 19,328 |
from textwrap import dedent
def get_density(molecule_name, temperature=273.15, pressure=101325,
cycles=5000, init_cycles="auto",
forcefield="CrystalGenerator"):
"""Calculates the density of a gas through an NPT ensemble.
Args:
molecule_name: The molecule to test for adsorption. A file of the same
name must exist in `$RASPA_DIR/share/raspa/molecules/TraPPE`.
temperature: (Optional) The temperature of the simulation, in Kelvin.
pressure: (Optional) The pressure of the simulation, in Pascals.
cycles: (Optional) The number of simulation cycles to run.
init_cycles: (Optional) The number of initialization cycles to run.
Defaults to the minimum of cycles / 2 and 10,000.
forcefield: (Optional) The forcefield to use. Name must match a folder
in `$RASPA_DIR/share/raspa/forcefield`, which contains the properly
named `.def` files.
Returns:
The density, as a float, in kg/m^3.
"""
print_every = cycles // 10
if init_cycles == "auto":
init_cycles = min(cycles // 2, 10000)
script = dedent("""
SimulationType {simulation_type}
NumberOfCycles {cycles}
NumberOfInitializationCycles {init_cycles}
PrintEvery {print_every}
Forcefield {forcefield}
Box 0
BoxLengths 30 30 30
ExternalTemperature {temperature}
ExternalPressure {pressure}
VolumeChangeProbability 0.25
Component 0 MoleculeName {molecule_name}
MoleculeDefinition TraPPE
TranslationProbability 0.5
ReinsertionProbability 0.5
CreateNumberOfMolecules 256
""".format(**locals())).strip()
output = parse(run_script(script))
return output["Average Density"]["[kg/m^3]"][0] | c36e236c98d3f99a19979701c287e740543c909f | 19,329 |
def is_node_up(config, host):
"""
Calls nodetool statusbinary, nodetool statusthrift or both. This function checks the output returned from nodetool
and not the return code. There could be a normal return code of zero when the node is an unhealthy state and not
accepting requests.
:param health_check: Supported values are cql, thrift, and all. The latter will perform both checks. Defaults to
cql.
:param host: The target host on which to perform the check
:return: True if the node is accepting requests, False otherwise. If both cql and thrift are checked, then the node
must be ready to accept requests for both in order for the health check to be successful.
"""
health_check = config.checks.health_check
if int(config.cassandra.is_ccm) == 1:
args = ['ccm', 'node1', 'nodetool']
if health_check == 'thrift':
return is_ccm_up(args, 'statusthrift')
elif health_check == 'all':
return is_ccm_up(list(args), 'statusbinary') and is_ccm_up(list(args), 'statusthrift')
else:
return is_ccm_up(args, 'statusbinary')
else:
cassandra = Cassandra(config.cassandra)
native_port = cassandra.native_port
rpc_port = cassandra.rpc_port
nc_timeout = 10
args = ['timeout', str(nc_timeout), 'nc', '-zv', host]
if health_check == 'thrift':
return is_cassandra_up(args, rpc_port)
elif health_check == 'all':
return is_cassandra_up(list(args), rpc_port) and is_cassandra_up(list(args), native_port)
else:
# cql only
return is_cassandra_up(args, native_port) | 4a80198ea8fede538f78f700f7d451dd65e8f435 | 19,330 |
def audits(program):
"""Create 2 audits mapped to the program"""
return [rest_facade.create_audit(program) for _ in xrange(2)] | 2fc7392c53cc0726dcba38b8eb44475fa5cb9ad8 | 19,331 |
def _create_run(uri, experiment_id, work_dir, entry_point):
"""
Create a ``Run`` against the current MLflow tracking server, logging metadata (e.g. the URI,
entry point, and parameters of the project) about the run. Return an ``ActiveRun`` that can be
used to report additional data about the run (metrics/params) to the tracking server.
"""
if _is_local_uri(uri):
source_name = tracking.utils._get_git_url_if_present(_expand_uri(uri))
else:
source_name = _expand_uri(uri)
existing_run = fluent.active_run()
if existing_run:
parent_run_id = existing_run.info.run_uuid
else:
parent_run_id = None
active_run = tracking.MlflowClient().create_run(
experiment_id=experiment_id,
source_name=source_name,
source_version=_get_git_commit(work_dir),
entry_point_name=entry_point,
source_type=SourceType.PROJECT,
parent_run_id=parent_run_id)
return active_run | 1ae9d084927423d88e0a737098b2567f940fbe11 | 19,332 |
def convertBoard(board):
"""
converts board into numerical representation
"""
flatBoard = np.zeros(64)
for i in range(64):
val = board.piece_at(i)
if val is None:
flatBoard[i] = 0
else:
flatBoard[i] = {"P": 1, "N" : 2, "B" : 3, "R" : 4, "Q" : 5, "K" : 6, "p" : 7, "n" : 8, "b" : 9, "r" : 10, "q" : 11, "k" : 12}[val.symbol()]
# return flatBoard
return flatBoard.reshape(64, 1, 1) | 2ad05a0b1561b38332cb3b92686f903c8bd0f260 | 19,333 |
from typing import Dict
from typing import Any
from typing import MutableMapping
import logging
def merge_optional(default_dict: Dict[str, Any], update_dict: Dict[str, Any], tpe: str):
"""
Function to merge dictionaries to add set parameters from update dictionary into default dictionary.
@param default_dict: Default configuraiton dictionary.
@type default_dict: dict
@param update_dict: Update configuration to be merged into default configurations.
@type update_dict: dict
@param tpe: String representation of type of learner.
@type tpe: str
@return: Result of merged dictionaries.
@rtype: dict
"""
default_copy = default_dict.copy()
for k, v in default_copy.items(): # pylint: disable=invalid-name
if k in update_dict:
if all(isinstance(e, MutableMapping) for e in (v, update_dict[k])):
update_dict[k] = merge_optional(v, update_dict[k], tpe)
else:
logging.warning(f"Gotten unknown alternative mapping {k}:{v} for {tpe}")
# Base case
update = list(filter(lambda item: item[1] is not None, update_dict.items()))
for k, v in update: # pylint: disable=invalid-name
if not isinstance(v, dict):
logging.info(f'Updating {k} from {default_copy[k]} to {v} for {tpe}')
default_copy[k] = v
return default_copy | 32e52e58604b01061b6c5a3122287c0e5d8a9a84 | 19,334 |
def reference_pixel_map(dimensions, instrument_name):
"""Create a map that flags all reference pixels as such
Parameters
----------
dimensions : tup
(y, x) dimensions, in pixels, of the map to create
instrument_name : str
Name of JWST instrument associated with the data
Returns
-------
ref_map : numpy.ndarray
2D map showing the locations of reference pixels (1)
"""
yd, xd = dimensions
ref_map = np.zeros(dimensions).astype(np.int)
ref_map[:, 0:4] = 1
ref_map[:, xd-4:xd] = 1
if instrument_name.lower() != 'miri':
ref_map[0:4, :] = 1
ref_map[yd-4:yd, :] = 1
return ref_map | 6800971d43ba481f3a62fce31eac21d93a36d42b | 19,335 |
import json
def geocode(value, spatial_keyword_type='hostname'):
"""convenience function to geocode a value"""
lat, lon = 0.0, 0.0
if spatial_keyword_type == 'hostname':
try:
hostname = urlparse(value).hostname
url = 'http://ip-api.com/json/%s' % hostname
LOGGER.info('Geocoding %s with %s', hostname, url)
content = json.loads(urlopen(url).read())
lat, lon = content['lat'], content['lon']
except Exception as err: # skip storage
msg = 'Could not derive coordinates: %s' % err
LOGGER.exception(msg)
return lat, lon | a2f94854a0881e5cb9041d0b134b0c4f811b132d | 19,336 |
def deploy_contract(w3, document_page_url, secret_key):
"""
TDeploy the contract
:param w3: the w3 connection
:param document_page_url: the document page url
:param secret_key: the operator secret key
:return: a pari tx_receipt, abi
"""
# 1. declare contract
document_sc = w3.eth.contract(abi=abi, bytecode=bytecode)
# 2. authenticate operator
gas_required = settings.GAS_CONTRACT_DEPLOY
gas_price = get_gas_price(w3)
acct = w3.eth.account.from_key(secret_key)
check_balance(w3, acct.address, minimum_required=(gas_required * gas_price))
# 3. create the constructor transaction
construct_txn = document_sc.constructor(document_page_url).buildTransaction({
'from': acct.address,
'nonce': w3.eth.get_transaction_count(acct.address),
'gas': gas_required,
'gasPrice': gas_price})
# 4. sign transaction
signed = acct.sign_transaction(construct_txn)
# 5. send signed transaction
tx_hash = w3.eth.send_raw_transaction(signed.rawTransaction)
return w3.eth.wait_for_transaction_receipt(tx_hash) | f295d263aba4929d84897da4dfc6af08fab7e808 | 19,337 |
from datetime import datetime
def format_date(format_string=None, datetime_obj=None):
"""
Format a datetime object with Java SimpleDateFormat's-like string.
If datetime_obj is not given - use current datetime.
If format_string is not given - return number of millisecond since epoch.
:param format_string:
:param datetime_obj:
:return:
:rtype string
"""
datetime_obj = datetime_obj or datetime.now()
if format_string is None:
seconds = int(datetime_obj.strftime("%s"))
milliseconds = datetime_obj.microsecond // 1000
return str(seconds * 1000 + milliseconds)
else:
formatter = SimpleDateFormat(format_string)
return formatter.format_datetime(datetime_obj) | ea72af8ad5d3ba1999d9c05cd87203d59651bcc3 | 19,338 |
import pathlib
def testdata(request):
"""
If expected data is required for a test this fixture returns the path
to a folder with name '.testdata' located in the same director as the
calling test module
"""
testdata_dir = '.testdata'
module_dir = pathlib.Path(request.fspath).parent
return module_dir / testdata_dir | 5d9a440b178aca00635f567420aaa9c406a1d7d2 | 19,339 |
def set_price(location, algo, order, price):
"""
https://api.nicehash.com/api?method=orders.set.price&id=8&key=3583b1df-5e93-4ba0-96d7-7d621fe15a17&location=0&algo=0&order=1881&price=2.1
:param location:
:param algo:
:param order:
:param price:
:return:
"""
resp = query('orders.set.price', {'location': location, 'algo': algo, 'order': order, 'price': price})
ret = resp.json()
return ret['result'] | d8b90329c57e26a2a26f4aa134c66b9e1fd8b2c9 | 19,340 |
def jaccard_coef_loss(y_true, y_pred):
"""
Loss based on the jaccard coefficient, regularised with
binary crossentropy
Notes
-----
Found in https://github.com/ternaus/kaggle_dstl_submission
"""
return (-K.log(jaccard_coef(y_true, y_pred)) +
K.binary_crossentropy(y_pred, y_true)) | 4c3d1def3ce650e9f7cd87e1a5d645d3a022f7a6 | 19,341 |
def safe_str(obj):
""" return the byte string representation of obj """
try:
return str(obj)
except UnicodeEncodeError:
# obj is unicode
return unicode(obj).encode('unicode_escape') | 74d3c75b03f38b0b151ff135a6a2ab3738822629 | 19,344 |
def default_user_agent():
"""Return a string representing the default user agent."""
return f'airslate/{__version__} ({__url__})' | 3de3e0adcf766a00b89479d8e8b00369a3f884a8 | 19,345 |
def join_complementary_byteblocks(block) -> int:
"""
join_complementary_byteblocks used to combine low bit data and high bit data
as the representation of complementary code
Parameters
----------
block : list
Low Digit Block -> int
High Digit Block -> int
Returns
-------
parsed : int
low | high << 8 ... (complementary code)
Example:
LowDigitBlock = 0 # 0xff
HighDigitBlock = 255 # 0x00
block = [LowDigitBlock, HighDigitBlock] # low first
join_complementary_byteblocks(block)
-> -256 # -0x100
"""
n_byte = len(block)
sign_bound = 2 ** (n_byte * 8 - 1)
sign_block = 2 ** (n_byte * 8)
parsed = join_byteblocks(block)
if parsed < sign_bound:
return parsed
else:
return parsed - sign_block | 0e86b132663f0d0517db9188a91443d698b74889 | 19,346 |
import numpy
def seed(func):
""" Decorator to seed the RNG before any function. """
@wraps(func)
def wrapper(*args, **kwargs):
numpy.random.seed(0)
return func(*args, **kwargs)
return wrapper | 3f1f563213d2e9175928e2f7fb5ddc90367ea0a6 | 19,347 |
def trim_bandstructure(
energy_cutoff: float, band_structure: BandStructure
) -> BandStructure:
"""
Trim the number of bands in a band structure object based on a cutoff.
Args:
energy_cutoff: An energy cutoff within which to keep the bands. If the system
is metallic then the bands to keep will fall within +/- the cutoff around
the Fermi level. If the system has a band gap, the bands from the VBM -
energy_cutoff to CBM + energy_cutoff will be kept.
band_structure: A band structure.
Returns:
A trimmed band structure.
"""
if band_structure.is_metal():
min_e = band_structure.efermi - energy_cutoff
max_e = band_structure.efermi + energy_cutoff
else:
min_e = band_structure.get_vbm()["energy"] - energy_cutoff
max_e = band_structure.get_cbm()["energy"] + energy_cutoff
new_bands = {}
for spin, bands in band_structure.bands.items():
ibands = np.any((bands > min_e) & (bands < max_e), axis=1)
new_bands[spin] = bands[ibands]
return BandStructure(
np.array([k.frac_coords for k in band_structure.kpoints]),
new_bands,
lattice=band_structure.lattice_rec,
efermi=band_structure.efermi,
coords_are_cartesian=False,
structure=band_structure.structure,
) | 3f3a0ead00657d4ceb32a5303823c7d9c538222f | 19,348 |
def _run_command(c: InvokeContext, cmd: str) -> CommandResult:
"""
Command runner.
:argument c: InvokeContext
:argument cmd: str the command to run
"""
try:
result = c.run(cmd)
return CommandResult(
exit_code=result.exited,
message=result.stdout,
command=cmd
)
except UnexpectedExit as e:
raise NonZeroExitException(
exit_code=e.result.exited,
message=e.result.stderr,
command=cmd
) | 9b6e90f894099c1534b63e6171863a5430be3146 | 19,350 |
def get_proto_messages(protocol: str) -> list:
""" Get messages of a protocol. """
db = MetaDB()
rows = db.get_all_meta()
db.close_conn()
messages = set(row[1] for row in rows if row[0] == protocol)
return messages | 586b7f32b7ef1e713cacd31bdbb36d8f514e882d | 19,351 |
def TokenAMarkdownReference(href, reference, title=None):
"""
[link text][1]
[1]: <https://example.com> "Title"
<a href="https://example.com" title="Title">link text</a>
"""
title = ' "%s"' % title if title else ""
data = "[%s]: %s%s" % (reference, href, title)
token = {
"type": "Characters",
"data": data,
"_md_type": mdTokenTypes["TokenAMarkdownReference"],
}
return token | 48077864db045b841efa517a0136cf56a5d9ec8b | 19,352 |
import math
def inverse_document_frequency(word_occurrence, num_texts):
"""Takes in a word (string) and texts (list of lists and calculates the
number of texts over number of texts where the word occurs"""
try:
IDF = float(num_texts) / float(word_occurrence)
return math.log(IDF)
except ZeroDivisionError:
return 0 | a64e84b3c7d378e61765a84d3f4405e77b2ffd40 | 19,353 |
def bip32_mprv_from_seed(seed: octets, version: octets) -> bytes:
"""derive the master extended private key from the seed"""
if isinstance(version, str): # hex string
version = bytes.fromhex(version)
assert version in PRIVATE, "wrong version, master key must be private"
# serialization data
xmprv = version # version
xmprv += b'\x00' # depth
xmprv += b'\x00\x00\x00\x00' # parent pubkey fingerprint
xmprv += b'\x00\x00\x00\x00' # child index
# actual extended key (key + chain code) derivation
if isinstance(seed, str): # hex string
seed = bytes.fromhex(seed)
hd = HMAC(b"Bitcoin seed", seed, sha512).digest()
mprv = octets2int(hd[:32])
xmprv += hd[32:] # chain code
xmprv += b'\x00' + mprv.to_bytes(32, 'big') # private key
return b58encode_check(xmprv) | 2837bb9accc050e2defa3f38781d17d6f4917193 | 19,354 |
def create_menu_node(context: WxRenderingContext) -> WxNode:
"""Creates node from xml node using namespace as module and tag name as class name"""
inst_type = get_type(context.xml_node)
args = get_attr_args(context.xml_node, 'init', context.node_globals)
inst = inst_type(**args)
return WxNode(inst, context.xml_node, node_globals=context.node_globals) | 3b3a215b917c980eb273f19f9f6dc9587df6874f | 19,355 |
def attach_calibration_pattern(ax, **calibration_pattern_kwargs):
"""Attach a calibration pattern to axes.
This function uses calibration_pattern to generate a figure.
Args:
calibration_pattern_kwargs: kwargs, optional
Parameters to be given to the calibration_pattern function.
Returns:
image_axes: matplotlib.AxesImage
See matplotlib.imshow documentation
Useful for changing the image dynamically
circle_artist: matplotlib.artist
See matplotlib.circle documentation
Useful for removing the circle from the figure
"""
pattern, flow = calibration_pattern(**calibration_pattern_kwargs)
flow_max_radius = calibration_pattern_kwargs.get("flow_max_radius", 1)
extent = (-flow_max_radius, flow_max_radius) * 2
image = ax.imshow(pattern, extent=extent)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
for spine in ("bottom", "left"):
ax.spines[spine].set_position("zero")
ax.spines[spine].set_linewidth(1)
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
attach_coord(ax, flow, extent=extent)
circle = plt.Circle((0, 0), flow_max_radius, fill=False, lw=1)
ax.add_artist(circle)
return image, circle | 935c3f9d3651162963c10580fdcfe94e11627a0f | 19,356 |
import base64
def alignplot(align_data, en_tokens = None, es_tokens = None, annot = False):
"""
plot the align data with tokens in both language
:params: annot: whether give annot on each element in the matrix
:params: align_data: attention matrix, array-like
:params: en_tokens: english tokens (list, array)
:params: es_tokens: spanish tokens (list, array)
"""
align_data_shape = align_data.shape
if en_tokens is not None and es_tokens is not None:
if annot:
fig = plt.figure(figsize = (align_data_shape[0]/3,align_data_shape[1]/3))
sns.heatmap(align_data, cmap = "Reds", annot=annot, fmt=".1f", cbar = True, linewidths=.5, linecolor='gray', xticklabels = en_tokens, yticklabels = es_tokens)
else:
fig = plt.figure()
sns.heatmap(align_data, cmap = "Reds", annot=annot, fmt=".1f", cbar = True, linewidths=.5, xticklabels = en_tokens, yticklabels = es_tokens)
plt.xticks(rotation=45)
image = BytesIO()
fig.tight_layout()
fig.savefig(image, format='jpeg')
return base64.b64encode(image.getvalue()).decode('utf-8').replace('\n', '') | 52cf41491e70c8340a40fb16a6fdd4ffbcd68345 | 19,357 |
from datetime import datetime
def to_datetime(timestamp: str) -> datetime.datetime:
"""Converts a timestamp string in ISO format into a datatime object.
Parameters
----------
timstamp : string
Timestamp in ISO format
Returns
-------
datetime.datetime
Datetime object
"""
# Assumes a string in ISO format (with or without milliseconds)
for format in ['%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S']:
try:
return datetime.datetime.strptime(timestamp, format)
except ValueError:
pass
return isoparse(timestamp) | 16a2a1ac36e2d1988ddf41ec05fbc59a9a596dff | 19,359 |
from typing import Any
from typing import Union
from typing import Callable
async def handle_arg(
ctx: SlashContext,
key: str,
value: Any,
type_: Union[Callable, commands.Converter]
) -> Any:
"""
Handle an argument and deal with typing.Optional modifiers
Parameters
----------
ctx : SlashContext
The context of the argument
key : str
The argument name
value : any
The value of the argument
type_ : callable or commands.Converter
The type, instance of type converter or type converter class
Returns
-------
any
The handled argument
Raises
------
BadSlashArgument
Invalid argument, this occurs when no converter is able to provide a
non-None response or not raise an error while converting. If a
typing.Optional is provided, or any object with .__args__ containing
NoneType, then this method will never raise and will instead return
None.
"""
# Is a typing.Optional, typing.Union, etc class
if hasattr(type_, "__args__"):
optional = type(None) in type_.__args__
for item in type_.__args__: # Iterate through possible types
if item is None:
# Don't try to convert with None
continue
try:
# Attempt to convert, this also allows nesting of
# typing.Optional etc
new_value = await handle_arg(ctx, key, value, item)
# Return by default if it's good (should go left to right)
return new_value
except Exception as exc:
# This is optional, so we can skip past it
pass
if not optional:
raise BadSlashArgument(message=f"Argument {key} is not of any valid type")
return None
else:
if hasattr(type_, "convert"):
# Check item is instantiated
if isinstance(type_, type):
# Instantaniate the class first
type_converter = type_().convert
else:
# Grab the function of the init'd converter
type_converter = type_.convert
try:
return await type_converter(ctx, value)
except Exception as exc:
raise BadSlashArgument(f"Failed to convert argument {key}") from exc
else:
# Probably not a converter
return type_(value) | 21808304b4d194cfd13d705469babd7ec8c20c20 | 19,360 |
def get_module_source_path(modname, basename=None):
"""Return module *modname* source path
If *basename* is specified, return *modname.basename* path where
*modname* is a package containing the module *basename*
*basename* is a filename (not a module name), so it must include the
file extension: .py or .pyw
Handles py2exe/cx_Freeze distributions"""
srcpath = get_module_path(modname)
parentdir = osp.join(srcpath, osp.pardir)
if osp.isfile(parentdir):
# Parent directory is not a directory but the 'library.zip' file:
# this is either a py2exe or a cx_Freeze distribution
srcpath = osp.abspath(osp.join(osp.join(parentdir, osp.pardir),
modname))
if basename is not None:
srcpath = osp.abspath(osp.join(srcpath, basename))
return srcpath | b212713a53a14deb0fcd1a1518d2665bef03091f | 19,362 |
def drop_useless_columns(data):
"""Drop the columns containing duplicate or useless columns."""
data = data.drop(
labels=[
# we stay in a given city
"agency_id",
"agency_name",
"agency_short_name",
# we stay on a given transportation network
"transportation_type",
"transportation_subtype",
# we already have stop id
"stop_name_unofficial",
# we already have line name
"line_id",
# we don't need this
"circuit_transfer",
],
axis=1,
)
return data | 7a47625a5df7e9fa66cefe2f326af3f0b9f59b79 | 19,363 |
from typing import Any
from typing import Optional
def arg_to_timestamp(arg: Any, arg_name: str, required: bool = False) -> Optional[int]:
"""Converts an XSOAR argument to a timestamp (seconds from epoch)
This function is used to quickly validate an argument provided to XSOAR
via ``demisto.args()`` into an ``int`` containing a timestamp (seconds
since epoch). It will throw a ValueError if the input is invalid.
If the input is None, it will throw a ValueError if required is ``True``,
or ``None`` if required is ``False.
:type arg: ``Any``
:param arg: argument to convert
:type arg_name: ``str``
:param arg_name: argument name
:type required: ``bool``
:param required:
throws exception if ``True`` and argument provided is None
:return:
returns an ``int`` containing a timestamp (seconds from epoch) if conversion works
returns ``None`` if arg is ``None`` and required is set to ``False``
otherwise throws an Exception
:rtype: ``Optional[int]``
"""
if arg is None:
if required is True:
raise ValueError(f'Missing "{arg_name}"')
return None
if isinstance(arg, str) and arg.isdigit():
# timestamp is a str containing digits - we just convert it to int
return int(arg)
if isinstance(arg, str):
# we use dateparser to handle strings either in ISO8601 format, or
# relative time stamps.
# For example: format 2019-10-23T00:00:00 or "3 days", etc
date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC'})
if date is None:
# if d is None it means dateparser failed to parse it
raise ValueError(f'Invalid date: {arg_name}')
return int(date.timestamp())
if isinstance(arg, (int, float)):
# Convert to int if the input is a float
return int(arg)
raise ValueError(f'Invalid date: "{arg_name}"') | 00fa81b041746be0e3f2e6a3c0545b72ab86fe3b | 19,364 |
import math
def expfloats (floats):
"""Manipulates floats so that their tiles are logarithmic sizes
large to small"""
return [math.exp(i) for i in floats] | 3943b8f8eedd4195693e0bace2223819f3728bb2 | 19,365 |
def typeChecker(obj, *types, error=True):
"""
Check type(s) of an object.
The first type correlates to the first layer of obj and so on.
Each type can be a (tuple that holds) type, string or literal object such as `None`.
:param obj: Generic obj, iterable or not
:param types: lists or tuples if obj at that level can be multiple types, single type if only one
:param error: Raise error if true, otherwise returns False when fail
:return:
"""
literalObjects = [None]
try:
if not types:
raise TypeError("No types were given as args")
types = _typeChecker_prepareTypesList(types, literalObjects)
_typeChecker_checkObject(obj, types, literalObjects)
except TypeError as e:
if error:
raise e
else:
return False
else:
return True | 8ced14861295dd29bcbe2558d18a77d616cfec91 | 19,366 |
def post_profile_identifier_chunks_token(identifier, token):
"""
Updates a public chunk.
"""
chunk = models.Chunk.get_by_token(identifier, token)
if not chunk:
raise errors.ResourceNotFound('That chunk does not exist')
stream_entity = chunk.key.parent().get()
others = filter(lambda p: p.account != chunk.sender, stream_entity.participants)
if len(others) != 1:
raise errors.ForbiddenAction('Cannot modify that chunk')
stream = streams.MutableStream(others[0], stream_entity)
# Update played state on behalf of the receiver.
if flask_extras.get_flag('played'):
was_unplayed = not stream.is_played
stream.set_played_until(chunk.end, report=False)
stream._report('played', duration=chunk.duration / 1000.0, unplayed=was_unplayed)
return {'success': True} | 7a285df6eb2d41fc28d73b3c5da0621037808735 | 19,367 |
import logging
def get_latest_checkpoint(checkpoint_dir: str) -> int:
"""Find the episode ID of the latest checkpoint, if any."""
glob = osp.join(checkpoint_dir, 'checkpoint_*.pkl')
def extract_episode(x):
return int(x[x.rfind('checkpoint_') + 11:-4])
try:
checkpoint_files = tf.io.gfile.glob(glob)
except tf.errors.NotFoundError:
logging.warning('Unable to reload checkpoint at %s', checkpoint_dir)
return -1
try:
latest_episode = max(extract_episode(x) for x in checkpoint_files)
except ValueError:
return -1
return latest_episode | 13677352d0d942bbc5348003ee449e27ea9f0371 | 19,368 |
def lazy(f):
"""A decorator to simply yield the result of a function"""
@wraps(f)
def lazyfunc(*args):
yield f(*args)
return lazyfunc | 5b44d01e0b982a8c36e8ce6aab2f9590e96509e6 | 19,369 |
def build_source_test_raw_sql(test_namespace, source, table, test_type,
test_args):
"""Build the raw SQL from a source test definition.
:param test_namespace: The test's namespace, if one exists
:param source: The source under test.
:param table: The table under test
:param test_type: The type of the test (unique_id, etc)
:param test_args: The arguments passed to the test as a list of `key=value`
strings
:return: A string of raw sql for the test node.
"""
# sort the dict so the keys are rendered deterministically (for tests)
kwargs = [as_kwarg(key, test_args[key]) for key in sorted(test_args)]
if test_namespace is None:
macro_name = "test_{}".format(test_type)
else:
macro_name = "{}.test_{}".format(test_namespace, test_type)
raw_sql = (
"{{{{ {macro}(model=source('{source}', '{table}'), {kwargs}) }}}}"
.format(
source=source['name'],
table=table['name'],
macro=macro_name,
kwargs=", ".join(kwargs))
)
return raw_sql | 2603081c3e36b9cdb3ca4e0c2381cbb5f34b3df4 | 19,370 |
from typing import List
from typing import Tuple
from typing import Optional
def get_random_word(used_words: List[int]) -> Tuple[Optional[str], List[int]]:
"""Select a random word from a list and pass on a list of used words.
Args:
used_words (list): A list of the indexes of every already used word.
Returns:
Tuple[Optional[str], list]: The random word that is selected and a list
of the index of every random word that has been selected.
"""
list_of_words: List[str] = [
"hangman", "kanban", "evidence", "problem", "decomposed", "components",
"developed", "trialled", "assembled", "tested", "create", "final",
"working", "outcome"
]
if len(used_words) == len(list_of_words):
# if len(used_words) == len(list_of_words), when line 26 and 27
# run, it will just delete every word from list_of_words, so we
# save the computational energy, and another if statement to check
# if list_of_words_without_used_words is empty, and just return
# None as the word, thus signalling to the caller, that the word
# list is empty.
return None, used_words
list_of_words_without_used_words: List[str] = list_of_words.copy()
for i in sorted(used_words, reverse=True):
# used_words is looped through in reverse for this as, when in
# reverse, the 'popping' of an list item at index 'i' will
# never effect a higher index list item, as that higher index
# list item has already been popped.
list_of_words_without_used_words.pop(i)
# len(list_of_words_without_used_words) - 1 because, lists start
# at index 0.
random_number: int = randint(0, len(list_of_words_without_used_words) - 1)
word = list_of_words_without_used_words[random_number]
# because random_number picks a word from a popped version of
# list_of_words, we can't directly translate the index of the word in
# list_of_words_without_used_words to the index of the word in
# list_of_words, therefore, we have to use the list.index()
# function to find the index of the word in the full list.
used_words.append(list_of_words.index(word))
return word, used_words | bac4ebbf81cafef6fb615b8b4de7e74b848e794a | 19,371 |
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, height, shuffle, channels_last=True):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
if not channels_last:
image = tf.transpose(image, [2, 0, 1])
features = {
'images': image,
'labels': tf.one_hot(label, 10),
'recons_image': image,
'recons_label': label,
}
if shuffle:
batched_features = tf.train.shuffle_batch(
features,
batch_size=batch_size,
num_threads=16,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
batched_features = tf.train.batch(
features,
batch_size=batch_size,
num_threads=1,
capacity=min_queue_examples + 3 * batch_size)
batched_features['labels'] = tf.reshape(batched_features['labels'],
[batch_size, 10])
batched_features['recons_label'] = tf.reshape(
batched_features['recons_label'], [batch_size])
batched_features['height'] = height
batched_features['depth'] = 3
batched_features['num_targets'] = 1
batched_features['num_classes'] = 10
# Display the training images in the visualizer.
tf.summary.image('images', batched_features['images'])
return batched_features | 262d9ab1cace40ceb08b5bd1d8a4dd33f7fc151d | 19,372 |
def riskscoreci(x1, n1, x2, n2, alpha=0.05, correction=True):
"""Compute CI for the ratio of two binomial rates.
Implements the non-iterative method of Nam (1995).
It has better properties than Wald/Katz intervals,
especially with small samples and rare events.
Translated from R-package 'PropCIs':
https://github.com/shearer/PropCIs
Nam, J. M. (1995) Confidence limits for the ratio of two binomial proportions based on likelihood
scores: Non-iterative method. Biom. J. 37 (3), 375-379.
Koopman PAR. (1984) Confidence limits for the ratio of two binomial proportions. Biometrics 40,
513-517.
Miettinen OS, Nurminen M. (1985) Comparative analysis of two rates. Statistics in Medicine 4,
213-226.
Nurminen, M. (1986) Analysis of trends in proportions with an ordinally scaled determinant. Biometrical
J 28, 965-974
Agresti, A. (2002) Categorical Data Analysis. Wiley, 2nd Edition.
Parameters
----------
xi : int
Number of events in group i
ni : int
Number of trials/subjects in group i
alpha : float
Specifies coverage of the confidence interval
correction : bool
A corrected estimate of RR can be returned by adding 0.5 to each cell
of the contingency table.
Returns
-------
ci : array
Confidence interval array [LL, UL, RR_est]"""
if correction:
rr_est = ((x1+0.5) / (n1+1)) / ((x2+0.5) / (n2+1))
else:
rr_est = (x1 / n1) / (x2 / n2)
z = np.abs(stats.norm.ppf(alpha/2))
if x2==0 and x1 == 0:
ul = np.inf
ll = 0
else:
a1 = n2*(n2*(n2+n1)*x1+n1*(n2+x1)*(z**2))
a2 = -n2*(n2*n1*(x2+x1)+2*(n2+n1)*x2*x1+n1*(n2+x2+2*x1)*(z**2))
a3 = 2*n2*n1*x2*(x2+x1)+(n2+n1)*(x2**2)*x1+n2*n1*(x2+x1)*(z**2)
a4 = -n1*(x2**2)*(x2+x1)
b1 = a2/a1
b2 = a3/a1
b3 = a4/a1
c1 = b2-(b1**2)/3
c2 = b3-b1*b2/3+2*(b1**3)/27
ceta = np.arccos(np.sqrt(27)*c2/(2*c1*np.sqrt(-c1)))
t1 = -2*np.sqrt(-c1/3)*np.cos(np.pi/3-ceta/3)
t2 = -2*np.sqrt(-c1/3)*np.cos(np.pi/3+ceta/3)
t3 = 2*np.sqrt(-c1/3)*np.cos(ceta/3)
p01 = t1-b1/3
p02 = t2-b1/3
p03 = t3-b1/3
p0sum = p01+p02+p03
p0up = np.min([p01,p02,p03])
p0low = p0sum-p0up-np.max([p01,p02,p03])
if x2 == 0 and x1 != 0:
ll = (1-(n1-x1)*(1-p0low)/(x2+n1-(n2+n1)*p0low))/p0low
ul = np.inf
elif x2 != n2 and x1==0:
ul = (1-(n1-x1)*(1-p0up)/(x2+n1-(n2+n1)*p0up))/p0up
ll = 0
elif x2 == n2 and x1 == n1:
ul = (n2+z**2)/n2
ll = n1/(n1+z**2)
elif x1 == n1 or x2 == n2:
if x2 == n2 and x1 == 0:
ll = 0
if x2 == n2 and x1 != 0:
phat1 = x2/n2
phat2 = x1/n1
phihat = phat2/phat1
phil = 0.95*phihat
chi2 = 0
while chi2 <= z:
a = (n2+n1)*phil
b = -((x2+n1)*phil+x1+n2)
c = x2+x1
p1hat = (-b-np.sqrt(b**2-4*a*c))/(2*a)
p2hat = p1hat*phil
q2hat = 1-p2hat
var = (n2*n1*p2hat)/(n1*(phil-p2hat)+n2*q2hat)
chi2 = ((x1-n1*p2hat)/q2hat)/np.sqrt(var)
ll = phil
phil = ll/1.0001
i = x2
j = x1
ni = n2
nj = n1
if x1 == n1:
i = x1
j = x2
ni = n1
nj = n2
phat1 = i/ni
phat2 = j/nj
phihat = phat2/phat1
phiu = 1.1*phihat
if x2 == n2 and x1 == 0:
if n2<100:
phiu = .01
else:
phiu = 0.001
chi1 = 0
while chi1 >= -z:
a = (ni+nj)*phiu
b = -((i+nj)*phiu+j+ni)
c = i+j
p1hat = (-b-np.sqrt(b**2-4*a*c))/(2*a)
p2hat = p1hat*phiu
q2hat = 1-p2hat
var = (ni*nj*p2hat)/(nj*(phiu-p2hat)+ni*q2hat)
chi1 = ((j-nj*p2hat)/q2hat)/np.sqrt(var)
phiu1 = phiu
phiu = 1.0001*phiu1
if x1 == n1:
ul = (1-(n1-x1)*(1-p0up)/(x2+n1-(n2+n1)*p0up))/p0up
ll = 1/phiu1
else:
ul = phiu1
else:
ul = (1-(n1-x1)*(1-p0up)/(x2+n1-(n2+n1)*p0up))/p0up
ll = (1-(n1-x1)*(1-p0low)/(x2+n1-(n2+n1)*p0low))/p0low
return np.array([ll, ul, rr_est]) | f1a976b744d7fca5f3e2005da952808db0d618ad | 19,373 |
def blend(a, b, alpha=0.5):
"""
Alpha blend two images.
Parameters
----------
a, b : numpy.ndarray
Images to blend.
alpha : float
Blending factor.
Returns
-------
result : numpy.ndarray
Blended image.
"""
a = skimage.img_as_float(a)
b = skimage.img_as_float(b)
return a*alpha+(1-alpha)*b | 2a3a6f68f48b7f6e3f5a51352082b40e63577955 | 19,374 |
def GetWsdlMethod(ns, wsdlName):
""" Get wsdl method from ns, wsdlName """
with _lazyLock:
method = _wsdlMethodMap[(ns, wsdlName)]
if isinstance(method, ManagedMethod):
# The type corresponding to the method is loaded,
# just return the method object
return method
elif method:
# The type is not loaded, the map contains the info
# to load the type. Load the actual type and
# return the method object
LoadManagedType(*method)
return _wsdlMethodMap[(ns, wsdlName)]
else:
raise KeyError("{0} {1}".format(ns, name)) | 00892aa048f53d35b39184d1e72ff08e6e36b51a | 19,375 |
def plot_feature_wise(indicators, plot=False, show=True, ax=None, nf_max=40):
"""Plot the statistics feature-wise."""
n_mv_fw = indicators['feature-wise']
n_rows = indicators['global'].at[0, 'n_rows']
if show:
with pd.option_context('display.max_rows', None):
print(
f'\n'
f'Statistics feature-wise:\n'
f'------------------------\n'
f'\n'
f'{n_mv_fw}'
)
if plot:
# Plot proportion of missing values in each feature
# Copy index in a column for the barplot method
n_mv_fw['feature'] = n_mv_fw.index
n_mv_fw['feature_shortened'] = n_mv_fw['id'].astype(str) + ': ' + n_mv_fw.index
# Truncate
if n_mv_fw.shape[0] <= nf_max:
def truncate(string):
if len(string) <= 20:
return string
return string[:27]+'...'
n_mv_fw['feature_shortened'] = n_mv_fw['feature_shortened'].apply(truncate)
# Add the total number of values for each feature
n_mv_fw['N V'] = n_rows
# Get rid of the features with no missing values
n_mv_fw_l = n_mv_fw[(n_mv_fw['N MV1'] != 0) | (n_mv_fw['N MV2'] != 0)]
n_mv_fw_l = n_mv_fw_l.head(20)
if ax is None:
fig, ax = plt.subplots(figsize=(10, 8))
else:
fig = plt.gcf()
if n_mv_fw_l.empty:
return fig, ax
sns.set_color_codes('pastel')
sns.barplot(x='N V', y='feature_shortened', data=n_mv_fw_l, ax=ax,
color='lightgray', label=f'Not missing', dodge=False)
sns.set_color_codes('muted')
sns.barplot(x='N MV', y='feature_shortened', data=n_mv_fw_l, ax=ax,
color='b', label=f'Missing - Not applicable')
sns.set_color_codes("dark")
sns.barplot(x='N MV2', y='feature_shortened', data=n_mv_fw_l, ax=ax,
color="b", label=f'Missing - Not available')
ax.legend(ncol=1, loc='lower right', frameon=True,
title='Type of values')
ax.set(ylabel='Features', xlabel='Number of values')
ax.tick_params(labelsize=7)
sns.despine(left=True, bottom=True, ax=ax)
# Remove y labels if more than 40
if n_mv_fw_l.shape[0] > nf_max:
ax.tick_params(axis='y', which='both', left=False, labelleft=False)
fig.tight_layout(rect=(0, 0, 1, .92))
else:
fig.tight_layout(rect=(0., 0, 1, .92))
return fig, ax | 603839a5d3c78ae09d4212500e12fea92620169d | 19,376 |
import re
def normalize(string: str) -> str:
"""
Normalize a text string.
:param string: input string
:return: normalized string
"""
string = string.replace("\xef\xbb\xbf", "") # remove UTF-8 BOM
string = string.replace("\ufeff", "") # remove UTF-16 BOM
# string = unicodedata.normalize("NFKD", string) # convert to NFKD normal form
string = re.compile(r"[0-9]").sub("0", string) # map all numbers to "0"
string = re.compile(r"(?:''|``|[\"βββββ«»])").sub("'", string) # normalize quotes
string = re.compile(r"(?:[ββββ]+|-{2,})").sub("--", string) # normalize dashes
string = re.compile(r"\s+").sub(" ", string) # collapse whitespace characters
return string.strip() | 2adaeffb60af598dad40bd6f5cd7e61e6b238123 | 19,377 |
def get_images(image_dir: str, image_url: str = DEFAULT_IMAGE_URL):
"""Gets image.
Args:
image (str): Image filename
image_url (str): Image url
Returns:
str: Output image filename
"""
images = list_images(image_dir)
if not images and image_url is not None:
print("No images found. Downloading ...")
try:
images = [download_file(image_url)]
except RuntimeError:
print("Unable to download file ({0}).".format(image_url))
if not images:
raise RuntimeError("Unable to provide images.")
return images | 3229d08606dabef04d43251f85c3b24c02ea9d93 | 19,378 |
import string
def generate_key(length=128):
"""Generate a suitable client secret"""
rand = SystemRandom()
return "".join(
rand.choice(string.ascii_letters + string.digits + string.punctuation)
for x in range(length)
) | 084d691c424f08d97aeefdd6dddff1012b009bc8 | 19,379 |
def upload_tosca_template(file): # noqa: E501
"""upload a tosca template description file
upload and validate a tosca template description file # noqa: E501
:param file: tosca Template description
:type file: werkzeug.datastructures.FileStorage
:rtype: str
"""
res = tosca_template_service.save(file)
if res:
return res
return 'Bad Request', 400 | b1f62613acda7f0e2abf2df0d3a19792dc5cd029 | 19,381 |
def extract_data(mask, dra, ddc, dra_err, ddc_err, ra_rad, dc_rad, ra_dc_cor=None):
"""Get a clean sample based on mask
Parameters
----------
mask : array of boolean
mask for extract data
dra/ddc : array of float
R.A.(*cos(Dec.))/Dec. differences
dra_err/ddc_err : array of float
formal uncertainty of dra(*cos(dc_rad))/ddc
ra_rad/dc_rad : array of float
Right ascension/Declination in radian
Returns
----------
dra_new/ddc_new: array of float
R.A.(*cos(Dec.))/Dec for the clean sample. differences
dra_err_new/ddc_err_new: array of float
formal uncertainty of dra(*cos(dc_rad))/ddc for the clean sample
ra_rad_new/dc_rad_new: array of float
Right ascension/Declination in radian for the clean sample
ra_dc_cor_new: array of float
covariance/correlation coefficient between dra and ddc for the clean sample
"""
# Extract the clean sample
dra_new, ddc_new = dra[mask], ddc[mask]
dra_err_new, ddc_err_new = dra_err[mask], ddc_err[mask]
ra_rad_new, dc_rad_new = ra_rad[mask], dc_rad[mask]
if ra_dc_cor is None:
ra_dc_cor_new = ra_dc_cor
else:
ra_dc_cor_new = ra_dc_cor[mask]
return dra_new, ddc_new, dra_err_new, ddc_err_new, ra_rad_new, dc_rad_new, ra_dc_cor_new | 70286c6134fb19833f6033c827bb2ab2cd26afb1 | 19,382 |
def holes_filler(arr_segm_with_holes, holes_label=-1, labels_sequence=(), verbose=1):
"""
Given a segmentation with holes (holes are specified by a special labels called holes_label)
the holes are filled with the closest labels around.
It applies multi_lab_segmentation_dilate_1_above_selected_label until all the holes
are filled.
:param arr_segm_with_holes:
:param holes_label:
:param labels_sequence: As multi_lab_segmentation_dilate_1_above_selected_label is not invariant
for the selected sequence.
:param verbose:
:return:
"""
num_rounds = 0
arr_segm_no_holes = np.copy(arr_segm_with_holes)
if verbose:
print('Filling holes in the segmentation:')
while holes_label in arr_segm_no_holes:
arr_segm_no_holes = multi_lab_segmentation_dilate_1_above_selected_label(arr_segm_no_holes,
selected_label=holes_label, labels_to_dilate=labels_sequence)
num_rounds += 1
if verbose:
print('Number of dilations required to remove the holes: {}'.format(num_rounds))
return arr_segm_no_holes | d008553591f1f1634aa1cf584c6639a12e8343a6 | 19,383 |
def isAbsolute(uri : str) -> bool:
""" Check whether a URI is Absolute. """
return uri is not None and uri.startswith('//') | 8597efe69ce82e80ee81beb9eb39754e90fcec28 | 19,384 |
from typing import Iterable
def token_converter(tokens: Iterable[str]) -> Iterable[str]:
"""Convert tokens."""
def convert(token: str) -> str:
return token.lower().replace("-", "_")
return map(convert, tokens) | 93bf2436c81091ec55b6a7a4d6d3fc728a68e093 | 19,385 |
def update_instance(instance, validated_data):
"""Update all the instance's fields specified in the validated_data"""
for key, value in validated_data.items():
setattr(instance, key, value)
return instance.save() | 2f4d5c4ec9e524cbe348a5efad9ecae27739b339 | 19,386 |
import scipy
def pupilresponse_nnls(tx, sy, event_onsets, fs, npar=10.1, tmax=930):
"""
Estimate single-event pupil responses based on canonical PRF (`pupil_kernel()`)
using non-negative least-squares (NNLS).
Parameters
-----------
tx : np.ndarray
time-vector in milliseconds
sy : np.ndarray
(baseline-corrected) pupil signal
event_onsets : list
onsets of events (stimuli/responses) in seconds
fs : float
sampling rate in Hz
npar,tmax: float
parameters for :py:func:`pypillometry.pupil.pupil_kernel()`
Returns
--------
(coef,pred,resid): tuple
coef: purely-positive regression coefficients
pred: predicted signal
resid: residuals (sy-pred)
"""
x1=pupil_build_design_matrix(tx, event_onsets, fs, npar, tmax, "estimate")
## we use a non-negative least squares solver to force the PRF-coefficients to be positive
coef=scipy.optimize.nnls(x1.T, sy)[0]
pred=np.dot(x1.T, coef) ## predicted signal
resid=sy-pred ## residual
return coef,pred,resid | afa8e02d5f5d566c9ed287c039cae16a97f3e0bf | 19,387 |
def _eval_expression(
expression,
params,
x,
ind_var="x",
aux_params=None,
domain=DEFAULT_DOMAIN,
rng=DEFAULT_RANGE,
):
"""Evaluate the expression at x.
Parameters
----------
expression : string
The expression that defines the calibration function.
params : array_like
List of floating point parameters for the calibration function,
referred to by the symbol "p".
x : float or array_like
The argument at which to evaluate the expression.
ind_var : str
The symbol of the independent variable. Default "x", "y" also allowed.
aux_params : array_like
Auxiliary floating point parameters for the calibration function,
referred to by the symbol "a". By default an empty array.
domain : array_like
The domain of the function. Will raise an error if the independent
variable is outside this interval. Must be finite. By default
DEFAULT_DOMAIN.
rng : array_like
The range of the function. Expression outputs will be clipped to this
interval. Must be finite. By default DEFAULT_RANGE.
Returns
-------
y : float or array_like
Result of evaluating the expression for x.
"""
_validate_domain_range(domain, rng)
x = np.asarray(x)
if not np.all(x >= domain[0]):
raise CalibrationError(f"{ind_var} must be >= {domain[0]}: {x}")
if not np.all(x <= domain[1]):
raise CalibrationError(f"{ind_var} must be <= {domain[1]}: {x}")
if ind_var not in ["x", "y"]:
raise CalibrationError(f"Independent variable {ind_var} must be 'x' or 'y'")
if aux_params is None:
aux_params = np.array([])
else:
aux_params = np.asarray(aux_params)
safe_eval.symtable["p"] = params
safe_eval.symtable["a"] = aux_params
safe_eval.symtable[ind_var] = x
y = safe_eval(expression)
if len(safe_eval.error) > 0:
raise CalibrationError(
"asteval failed with errors:\n"
+ "\n".join(str(err.get_error()) for err in safe_eval.error)
)
if not np.all(np.isreal(y)):
raise CalibrationError(f"Function evaluation resulted in complex values: {y}")
# clip values of y to the range
y = np.clip(y, rng[0], rng[1])
return y | 2a636268412346b8f55dce8b951cff97d12410eb | 19,388 |
def negative_frequency(P_m):
"""get the negative probability"""
sample_num = []
sample_prob = []
for key, value in P_m.items():
sample_num.append(key)
sample_prob.append(value)
return sample_num, np.array(sample_prob)/sum(sample_prob) | ad761b49b759ef28f721162e537419a1fe3fe40d | 19,389 |
import pipes
def _ShellQuote(command_part):
"""Escape a part of a command to enable copy/pasting it into a shell.
"""
return pipes.quote(command_part) | 31ccd5bd64de657cd3ac5c36c643e9f2f09f2318 | 19,390 |
import logging
def LogLEVEL(val):
"""
Return a sane loglevel given some value.
"""
if isinstance(val, (float, int)):
return int(val)
if isinstance(val, basestring):
return getattr(logging, val.upper())
return val | 5d6f096f5d9b8ac319ab3a8b73b978cb99775a9c | 19,391 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.