content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def read_cesar_out(cesar_line):
"""Return ref and query sequence."""
cesar_content = cesar_line.split("\n")
# del cesar_content[0]
fractions = parts(cesar_content, 4)
cesar_fractions = []
for fraction in fractions:
if len(fraction) == 1:
continue
ref_seq = fraction[1]
query_name = fraction[2][1:]
query_seq = fraction[3]
if len(ref_seq) != len(query_seq):
die("Error! Ref and query sequences must have the same length!")
elif len(ref_seq) == 0:
die("Error! The input is empty!")
fraction = (query_name, ref_seq, query_seq)
cesar_fractions.append(fraction)
return cesar_fractions | fb1a1a66647fb6d3e6fec1b27d26836067c6b023 | 14,070 |
def aa_i2c_slave_write_stats (aardvark):
"""usage: int return = aa_i2c_slave_write_stats(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_i2c_slave_write_stats(aardvark) | 87e64465b1bc79c7ab48e39274e39cab17f74755 | 14,071 |
import json
def get_aliases_user(request):
"""
Returns all the Aliases
API_ENDPOINT:api/v1/aliases
----------
payload
{
"email":"[email protected]"
}
"""
alias_array = []
payload = {}
print("came to get_aliases_user()")
data_received = json.loads(request.body)
email = data_received["email"]
print(f"Email body:{email}")
db_data = Aliases.objects.filter(user__email=email)
print(f"QuerySet->{db_data}")
for x in db_data:
alias_array.append(x.alias)
return JsonResponse({"alias":alias_array}, safe=False) | 2501fa15bafc2214585bd1e7d568a9a685725020 | 14,072 |
def _sorted_attributes(features, attrs, attribute):
"""
When the list of attributes is a dictionary, use the
sort key parameter to order the feature attributes.
evaluate it as a function and return it. If it's not
in the right format, attrs isn't a dict then returns
None.
"""
sort_key = attrs.get('sort_key')
reverse = attrs.get('reverse')
assert sort_key is not None, "Configuration " + \
"parameter 'sort_key' is missing, please " + \
"check your configuration."
# first, we find the _minimum_ ordering over the
# group of key values. this is because we only do
# the intersection in groups by the cutting
# attribute, so can only sort in accordance with
# that.
group = dict()
for feature in features:
val = feature[1].get(sort_key)
key = feature[1].get(attribute)
val = _no_none_min(val, group.get(key))
group[key] = val
# extract the sorted list of attributes from the
# grouped (attribute, order) pairs, ordering by
# the order.
all_attrs = sorted(group.iteritems(),
key=lambda x: x[1], reverse=bool(reverse))
# strip out the sort key in return
return [x[0] for x in all_attrs] | 473c3d30c4fde5f00932adfb50c4d34c08324d54 | 14,073 |
def ldensity_laplace_uniform_dist(prob_laplace, location, scale, low, high,
val):
"""
A mixture of a Laplace and a uniform distribution
"""
return np.log((prob_laplace * np.exp(-np.abs(val - location) / scale) / (2 * scale))
+ ((1 - prob_laplace) / (high - low))) | b069b2de4c2da3c69245b6a5507b61e918d5bb76 | 14,075 |
def readConfirmInput():
"""asks user for confirmation
Returns:
bool: True if user confirms, False if doesn't
"""
try:
result = readUserInput("(y/n): ") # UnrecognisedSelectionException
return 'y' in result[0].lower() # IndexError
except (UnrecognisedSelectionException, IndexError) as e:
return False | 007fe5e0002711db7cd0bcb1869dcbef9c667213 | 14,076 |
def linkElectron(inLep, inLepIdx, lepCollection, genPartCollection):
"""process input Electron, find lineage within gen particles
pass "find" as inLepIdx of particle to trigger finding within the method"""
linkChain = []
lepIdx = -1
if inLepIdx == "find":
for Idx, lep in enumerate(lepCollection):
if inLep == lep:
lepIdx = Idx
break
elif -1 < inLepIdx < len(lepCollection):
lepIdx = inLepIdx
else:
lepIdx = -999
tmpMoth = inLep.genPartIdx
#temporary deltaR with a default (only stored under logic error) and a calculation against the 'head' of the chain
tmpDeltaR = -9999.786
if len(linkChain) > 0:
tmpDeltaR = deltaR(inPart, linkChain[0][6])
elif len(linkChain) == 0:
tmpDeltaR = 0.0
linkChain.append( ("Electron", lepIdx, tmpMoth, inLep.pdgId, tmpDeltaR, inLep.genPartFlav, inLep) )
if -1 < tmpMoth < len(genPartCollection):
__ = linkGenPart(genPartCollection[tmpMoth], tmpMoth, genPartCollection, linkChain=linkChain)
return linkChain | 87747414f5e086f16a455dbc732f86ddcb0db630 | 14,077 |
def status():
"""Determines whether or not if CrowdStrike Falcon is loaded.
:return: A Boolean on whether or not crowdstrike is loaded.
:rtype: bool
.. code-block:: bash
salt '*' crowdstrike.status
"""
if not __salt__['crowdstrike.system_extension']():
# if we should be using a kext, just check the kext as falconctl stats
# can take a long time to run if falcon is already unloaded.
if not __salt__['kext.running']('com.crowdstrike.sensor'):
return False
try:
__salt__['crowdstrike.falconctl']('stats', timeout=5)
return True
except CommandExecutionError:
return False | e9bdbce3e290967b95d58ddf75c2054e06542043 | 14,078 |
def sparse_search(arr, s):
""" 10.5 Sparse Search: Given a sorted array of strings that is interspersed
with empty strings, write a method to find the location of a given string.
EXAMPLE:
Input: find "ball" in {"at", "", "", "" , "ball", "", "", "car", "" , "" , "dad", ""}
Output: 4
"""
def spread(arr, middle, left, right):
k = 1
while middle - k >= left and middle + k <= right:
if arr[middle - k] != "":
return middle - k
if arr[middle + k] != "":
return middle + k
k += 1
return middle
def rec_sparse_search(arr, s, left, right):
if left > right:
return None
middle = (left + right) / 2
if arr[middle] == "":
new_middle = spread(arr, middle, left, right)
if new_middle == middle:
return None
middle = new_middle
if arr[middle] == s:
return middle
if arr[middle] < s:
return rec_sparse_search(arr, s, left, middle - 1)
return rec_sparse_search(arr, s, middle + 1, right)
return rec_sparse_search(arr, s, 0, len(arr) - 1) | 605a56c518539117a83382c9e73d37d5e56b535f | 14,079 |
import uuid
def uuid_pk():
"""
Generate uuid1 and cut it to 12.
UUID default size is 32 chars.
"""
return uuid.uuid1().hex[:12] | 9efb12a6e72b02adcd4a64ca721ceab8c688055a | 14,080 |
def infected_symptomatic_00x80():
"""
Real Name: b'Infected symptomatic 00x80'
Original Eqn: b'Infected symptomatic 00+Infected symptomatic 80'
Units: b'person'
Limits: (None, None)
Type: component
b''
"""
return infected_symptomatic_00() + infected_symptomatic_80() | 0a3500659fad466c92fcd3d073003094c56efe9d | 14,081 |
def stencilCompare(firstElem, secondElem):
"""
stencilCompare(const std::pair< int, FP_PRECISION > &firstElem, const std::pair< int,
FP_PRECISION > &secondElem) -> bool
Comparitor for sorting k-nearest stencil std::pair objects
"""
return _openmoc.stencilCompare(firstElem, secondElem) | 3eda1a57e521134e77ba55ae38771f151699fdfd | 14,082 |
import random
def bisect_profiles_wrapper(decider, good, bad, perform_check=True):
"""Wrapper for recursive profile bisection."""
# Validate good and bad profiles are such, otherwise bisection reports noise
# Note that while decider is a random mock, these assertions may fail.
if perform_check:
if decider.run(good, save_run=False) != StatusEnum.GOOD_STATUS:
raise ValueError('Supplied good profile is not actually GOOD')
if decider.run(bad, save_run=False) != StatusEnum.BAD_STATUS:
raise ValueError('Supplied bad profile is not actually BAD')
common_funcs = sorted(func for func in good if func in bad)
if not common_funcs:
return {'ranges': [], 'individuals': []}
# shuffle because the results of our analysis can be quite order-dependent
# but this list has no inherent ordering. By shuffling each time, the chances
# of finding new, potentially interesting results are increased each time
# the program is run
random.shuffle(common_funcs)
results = bisect_profiles(decider, good, bad, common_funcs, 0,
len(common_funcs))
results['ranges'].sort()
results['individuals'].sort()
return results | 8fbe2f018c7dfb7fdeb71dd5080993a9773a41d7 | 14,083 |
import typing
def rolling_median_with_nan_forward_fill(vector: typing.List[float], window_length: int) -> typing.List[float]:
"""Computes a rolling median of a vector of floats and returns the results. NaNs will be forward filled."""
forward_fill(vector)
return rolling_median_no_nan(vector, window_length) | 708cd1f6371846ea3b7acb4d7b59a7a61f85de7c | 14,084 |
from typing import Optional
from typing import Dict
def Subprocess(
identifier: Optional[str] = None, variables: Optional[Dict] = None,
env: Optional[Dict] = None, volume: Optional[str] = None
) -> Dict:
"""Get base configuration for a subprocess worker with the given optional
arguments.
Parameters
----------
identifier: string, default=None
Unique worker identifier. If no identifier is given, a new unique
identifier will be generated.
variables: dict, default=None
Mapping with default values for placeholders in command template
strings.
env: dict, default=None
Default settings for environment variables when executing workflow
steps. These settings can get overridden by step-specific settings.
volume: string, default=None
Identifier for the storage volume that the worker has access to.
Returns
-------
dict
"""
return WorkerSpec(
worker_type=SUBPROCESS_WORKER,
variables=variables,
env=env,
identifier=identifier,
volume=volume
) | 97a90179c91ec862c6008e12ae6c12368ec301c5 | 14,086 |
def get_var(name):
"""
Returns the value of a settings variable.
The full name is CONTROLLED_VOCABULARY_ + name.
First look into django settings.
If not found there, use the value defined in this file.
"""
full_name = "CONTROLLED_VOCABULARY_" + name
ret = globals().get(full_name, None)
ret = getattr(settings, full_name, ret)
return ret | 3c7b5507a387917b9639510023948571160b5973 | 14,087 |
def get_schema_from_dataset_url_carbon(dataset_url,
key=None,
secret=None,
endpoint=None,
proxy=None,
proxy_port=None,
filesystem=None):
"""Returns a :class:`petastorm.unischema.Unischema` object loaded from a dataset specified by a url.
:param dataset_url: A dataset URL
:param key: access key
:param secret: secret key
:param endpoint: endpoint_url
:param proxy: proxy
:param proxy_port: proxy_port
:param filesystem: filesystem
:return: A :class:`petastorm.unischema.Unischema` object
"""
# Get a unischema stored in the dataset metadata.
stored_schema = get_schema_carbon(CarbonDataset(dataset_url,
key=key,
secret=secret,
endpoint=endpoint,
proxy=proxy,
proxy_port=proxy_port,
filesystem=filesystem))
return stored_schema | 2c562e39232dfbe1ac7359d0c88bd2a1efa5a334 | 14,088 |
import time
def get_all_metrics(model, epoch, val_x, val_y, start_time, loss_fn):
"""每个epoch结束后在发展集上预测,得到一些指标
:param model: tf.keras.Model, epoch训练后的模型
:param epoch: int, 轮数
:param val_x: tf.data.Dataset, 发展集的输入, 和val_y一样的sample_size
:param val_y: tf.data.Dataset, 发展集的标签
:param start_time: time.time, 开始时间
:param loss_fn: 损失函数
:return: 模型在发展集上的损失
"""
y_pred_val, y_true_val = [], []
loss_val = 0
sample_size_val = 0
for x_tmp, y_tmp in zip(val_x.as_numpy_iterator(), val_y.as_numpy_iterator()):
pred_tmp = model.predict(x_tmp)
y_pred_val.append(pred_tmp)
y_true_val.append(y_tmp)
loss_tmp = loss_fn(y_tmp, pred_tmp)
loss_val += np.sum(loss_tmp)
sample_size_val += x_tmp[0].shape[0]
# 计算损失
loss_val /= sample_size_val
# 计算auc
y_pred = np.concatenate(y_pred_val).astype(dtype=float)
y_true = np.concatenate(y_true_val).astype(dtype=float)
roc_auc_val = roc_auc_score(y_true, y_pred)
# 转化预测概率为类别
y_pred = np.where(y_pred > 0.5, np.ones_like(y_pred), np.zeros_like(y_pred))
# 计算混淆矩阵相关的
recall = recall_score(y_true=y_true, y_pred=y_pred)
precision = precision_score(y_true=y_true, y_pred=y_pred)
accuracy = accuracy_score(y_true=y_true, y_pred=y_pred)
line = f"""For epoch {epoch}, on val set loss is {round(loss_val, 5)}, auc is {round(roc_auc_val, 4)},
recall is {round(recall, 4)}, precision is {round(precision, 4)}, accuracy is {round(accuracy, 4)},
confusion_matrix is {confusion_matrix(y_true=y_true, y_pred=y_pred)}"""
line += f", time elapsed {(time.time() - start_time) / 60} mins"
print("HZJ info: ", line)
return loss_val | 24025cbdcc702ca32c5887f1ec2ccf424d492e69 | 14,089 |
def get_labels(decode_steps: DecodeSteps) -> LabelsDict:
"""Returns labels dict given DecodeSteps."""
return {
"target_action_types": decode_steps.action_types,
"target_action_ids": decode_steps.action_ids,
} | 66047e41b3d173e53b676a60e48647e3862aac16 | 14,090 |
def get_body_barycentric_posvel(body, time, ephemeris=None):
"""Calculate the barycentric position and velocity of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation`
Tuple of barycentric (ICRS) position and velocity.
See also
--------
get_body_barycentric : to calculate position only.
This is faster by about a factor two for JPL kernels, but has no
speed advantage for the built-in ephemeris.
Notes
-----
The velocity cannot be calculated for the Moon. To just get the position,
use :func:`~astropy.coordinates.get_body_barycentric`.
"""
return _get_body_barycentric_posvel(body, time, ephemeris) | 41be03294a5cd21163afae9650f556fc64257110 | 14,091 |
def recurDraw(num, data):
"""
Purpose: to draw polygons
Parameters: num - indicator of what layer the program is on, data - instance
of the Data class
Returns: data - instance of the data class
Calls: recurDraw - itself, Data - data processing class, toDraw - drawing
intermediary function
"""
if num == 0:
return num
num -= 1
data = recurDraw(num, data)
data = Data(num, data)
toDraw(data)
return data | d94d2f250396b6acfcf02306fd78b180f070aa92 | 14,092 |
def cont4():
"""
Two clusters, namely <cont1> (5 contours) and <cont3> 4 contours).
The enclosing contours of the clusters have a different value.
Contains 3 minima.
"""
cont_min = [
cncc(5, (6.00, 3.00), 0.2, (1, 1)),
cncc(2, (7.00, 4.00), 0.1, (4, 1), rmin=0.15),
cncc(2, (6.25, 3.25), 0.3, (6, 1), rmin=1.6, no_min=True),
cncc(5, (3.00, 3.00), 0.2, (1, 1)),
]
cont = [e for lst in cont_min for e in lst[0]]
min = [e for lst in cont_min for e in lst[1]]
return cont, min | c83cb48c3bc257dcf1ead50312d186464acdd57d | 14,093 |
def predict(test_data, qrnn, add_noise = False):
"""
predict the posterior mean and median
"""
if add_noise:
x_noise = test_data.add_noise(test_data.x, test_data.index)
x = (x_noise - test_data.mean)/test_data.std
y_prior = x_noise
y = test_data.y_noise
y0 = test_data.y
else:
x = (test_data.x - test_data.mean)/test_data.std
y_prior = test_data.x
y = test_data.y
y0 = test_data.y0
y_pre = qrnn.predict(x.data)
y_pos_mean = qrnn.posterior_mean(x.data)
return y_pre, y_prior, y0, y, y_pos_mean, x.data | d45e843d529babb99baa160ad976c0c9753da42d | 14,094 |
def handle_login_GET():
"""
Displays the index (the login page).
"""
if request.args.get('next'):
url_kwargs = dict(next=request.args.get('next'))
else:
url_kwargs = {}
try:
weblab_api.api.check_user_session()
except SessionNotFoundError:
pass # Expected behavior
else:
# User is already logged in, send him to the next url
return redirect(get_next_url())
return render_template("webclient/login.html", url_kwargs = url_kwargs) | f496519518b5d3b8a71ff4a8e60be2a2fe2110f3 | 14,095 |
import copy
def get_role_actions():
"""Returns the possible role to actions items in the application.
Returns:
dict(str, list(str)). A dict presenting key as role and values as list
of actions corresponding to the given role.
"""
return copy.deepcopy(_ROLE_ACTIONS) | 79b53e4003b1dc9264d9210f03395ce32d737c1e | 14,096 |
import json
def jsons_str_tuple_to_jsons_tuple(ctx, param, value):
"""
Converts json str into python map
"""
if value is None:
return []
else:
return [json.loads(a) for a in value] | 8b6f03650d566d74b0400868f12b59c2fa37bc3e | 14,097 |
def voucher_and_partial_matches_with_coupons(voucher_and_partial_matches):
"""
Returns a voucher with partial matching CourseRuns and valid coupons
"""
context = voucher_and_partial_matches
products = [
ProductFactory(content_object=course_run)
for course_run in context.partial_matches
]
coupon_eligibility_list = [
CouponEligibilityFactory(product=product) for product in products
]
payment_versions = [
CouponPaymentVersionFactory(amount=1, company=context.company)
for _ in coupon_eligibility_list
]
coupon_versions = [
CouponVersionFactory(
coupon=coupon_eligibility_list[i].coupon,
payment_version=payment_versions[i],
)
for i in range(len(coupon_eligibility_list))
]
return SimpleNamespace(
**vars(voucher_and_partial_matches),
products=products,
coupon_eligibility_list=coupon_eligibility_list,
coupon_versions=coupon_versions,
payment_versions=payment_versions,
) | 4f9e5732b0f3863504dec2aeef1309c0c24abc77 | 14,100 |
import batman
def one_transit(t=np.linspace(0,27,19440),
per=1., rp=0.1, t0=1., a=15., inc=87., ecc=0.,
w=90., limb_dark ='nonlinear', u=[0.5,0.1,0.1,-0.1]):
"""
~Simulates a one-sector long TESS light curve with injected planet transits per input parameters.~
Requires: batman; numpy
Args: t =times at which to calculate light curve, default is one TESS sector;
per =orbital period;
rp =planet radius (in units of stellar radii);
t0 =time of inferior conjunction);
a =semi-major axis (in units of stellar radii);
inc =orbital inclination (in degrees);
ecc =eccentricity;
w =longitude of periastron (in degrees);
limb_dark =limb darkening model;
u =limb darkening coefficients [u1, u2, u3, u4];
outputs: flux array =light curve with one injected transit at per, for use right before sim_lc to get TESS lc
"""
#### maybe should make params its own fcn and split this fcn into 2....
params = batman.TransitParams(); params.t0 = t0; params.per = per
params.rp = rp; params.a = a; params.inc = inc; params.ecc = ecc
params.w = w; params.limb_dark = limb_dark; params.u = u
m = batman.TransitModel(params, t) #initializes model
flux = m.light_curve(params) #calculates light curve
return flux, m, params | 4bb9a59e307cdab7554c10ae952279588c47bd94 | 14,101 |
def activate(request: Request) -> dict:
"""View to activate user after clicking email link.
:param request: Pyramid request.
:return: Context to be used by the renderer.
"""
code = request.matchdict.get('code', None)
registration_service = get_registration_service(request)
return registration_service.activate_by_email(code) | ccc543ff740d3c7ebbe7e0404c0ef6a7fc310866 | 14,103 |
def _create_eval_metrics_fn(
dataset_name, is_regression_task
):
"""Creates a function that computes task-relevant metrics.
Args:
dataset_name: TFDS name of dataset.
is_regression_task: If true, includes Spearman's rank correlation
coefficient computation in metric function; otherwise, defaults to
accuracy computation.
Returns:
Relevant metric function.
"""
def get_accuracy(guess, gold):
return (guess == gold).mean()
def get_mcc(guess, gold):
tp = ((guess == 1) & (gold == 1)).sum()
tn = ((guess == 0) & (gold == 0)).sum()
fp = ((guess == 1) & (gold == 0)).sum()
fn = ((guess == 0) & (gold == 1)).sum()
mcc_denom = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
mcc = (tp * tn - fp * fn) / (mcc_denom + 1e-6)
return mcc
def get_f1(guess, gold):
tp = ((guess == 1) & (gold == 1)).sum()
fp = ((guess == 1) & (gold == 0)).sum()
fn = ((guess == 0) & (gold == 1)).sum()
f1 = (2 * tp) / (2 * tp + fp + fn + 1e-6)
return f1
def get_f1_accuracy_mean(guess, gold):
return (get_f1(guess, gold) + get_accuracy(guess, gold)) / 2.0
def get_spearmanr(x, y):
return scipy_stats.spearmanr(x, y).correlation
eval_metrics = {}
if is_regression_task:
eval_metrics["spearmanr"] = get_spearmanr
else:
eval_metrics["accuracy"] = get_accuracy
if dataset_name == "glue/cola":
eval_metrics["mcc"] = get_mcc
elif dataset_name in ("glue/mrpc", "glue/qqp"):
eval_metrics["f1_accuracy_mean"] = get_f1_accuracy_mean
def metrics_fn(stats):
res = {}
for name, fn in eval_metrics.items():
res[name] = fn(stats["prediction"], stats["label"])
return res
return metrics_fn | 732baaf729739d7150f09185233efaa873045605 | 14,104 |
def brighter(rgb):
"""
Make the color (rgb-tuple) a tad brighter.
"""
_rgb = tuple([ int(np.sqrt(a/255) * 255) for a in rgb ])
return _rgb | f1d6ba4deea3896ce6754d622913b7f2d2af91e4 | 14,105 |
def delete_workspace_config(namespace, workspace, cnamespace, config):
"""Delete method configuration in workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
mnamespace (str): Method namespace
method (str): Method name
Swagger:
https://api.firecloud.org/#!/Method_Configurations/deleteWorkspaceMethodConfig
"""
uri = "workspaces/{0}/{1}/method_configs/{2}/{3}".format(namespace,
workspace, cnamespace, config)
return __delete(uri) | e86106fc6eabc0f1ae703e31abe3283e9df3e31b | 14,106 |
import math
def test_filled_transparent_graphs_2():
""" Two functions with transparend grid over them """
coordinate_system = cartesius.CoordinateSystem()
coordinate_system.add(
charts.Function(
math.sin,
start = -4,
end = 5,
step = 0.02,
fill_color = (0, 0, 255),
transparency_mask = 100))
coordinate_system.add(
charts.Function(
math.cos,
start = -4,
end = 5,
step = 0.02,
fill_color = (200, 255, 200),
transparency_mask = 100))
coordinate_system.add(elements.Grid(1, 1, transparency_mask=140))
return coordinate_system.draw(300, 200), coordinate_system.draw(300, 200, antialiasing=True) | 9cc51358b2e92a869ea318ac8d18f3b9ea988012 | 14,107 |
def get_shader_code(name):
""" Returns the shader as a string """
fname = op.join( op.dirname(__file__), name )
if op.exists( fname ):
with open(fname) as f:
return f.read() | bdd21d6c36b5e71608d48ecdce32adb79bb58428 | 14,108 |
import torch
def compute_translation_error(pred_pose, gt_pose, reduction="mean"):
"""
Computes the error (meters) in translation components of pose prediction.
Inputs:
pred_pose - (bs, 3) --- (x, y, theta)
gt_pose - (bs, 3) --- (x, y, theta)
Note: x, y must be in meters.
"""
error = torch.sqrt(
F.mse_loss(pred_pose[:, :2], gt_pose[:, :2], reduction=reduction)
)
return error | e1e0863c37a3c42e3081d5b21f529172315ccb66 | 14,109 |
def get_base_snippet_action_menu_items(model):
"""
Retrieve the global list of menu items for the snippet action menu,
which may then be customised on a per-request basis
"""
menu_items = [
SaveMenuItem(order=0),
DeleteMenuItem(order=10),
]
for hook in hooks.get_hooks('register_snippet_action_menu_item'):
action_menu_item = hook(model)
if action_menu_item:
menu_items.append(action_menu_item)
return menu_items | d741097c3e75764578e3f1aa6cc33cb194a40b42 | 14,110 |
def assign_file(package, source):
"""Initializes package output class.
Parameters
----------
package : :obj:`str`
Name of the package that generated the trajectory file.
source : :obj:`str`
Path to the trajectory file.
Returns
-------
The class corresponding to the correct package.
"""
if package.lower() == 'gamess':
return GAMESS(source)
else:
raise ValueError(f'{package} is not supported.') | 70f01dc69ef87738fd87b6c321787d7159a85e3a | 14,111 |
import logging
def _magpie_register_services_with_db_session(services_dict, db_session, push_to_phoenix=False,
force_update=False, update_getcapabilities_permissions=False):
# type: (ServicesSettings, Session, bool, bool, bool) -> bool
"""
Registration procedure of :term:`Services` from ``providers`` section using pre-established database session.
.. seealso::
:func:`magpie_register_services_from_config`
"""
db_session.begin(subtransactions=True)
existing_services_names = [n[0] for n in db_session.query(models.Service.resource_name)]
magpie_anonymous_user = get_constant("MAGPIE_ANONYMOUS_USER")
anonymous_user = UserService.by_user_name(magpie_anonymous_user, db_session=db_session)
for svc_name, svc_values in services_dict.items():
svc_new_url = svc_values["url"]
svc_type = svc_values["type"]
svc_config = svc_values.get("configuration")
svc_sync_type = svc_values.get("sync_type")
if force_update and svc_name in existing_services_names:
svc = models.Service.by_service_name(svc_name, db_session=db_session)
if svc.url == svc_new_url:
print_log("Service URL already properly set [{url}] ({svc})"
.format(url=svc.url, svc=svc_name), logger=LOGGER)
else:
print_log("Service URL update [{url_old}] => [{url_new}] ({svc})"
.format(url_old=svc.url, url_new=svc_new_url, svc=svc_name), logger=LOGGER)
svc.url = svc_new_url
svc.sync_type = svc_sync_type
svc.configuration = svc_config
elif not force_update and svc_name in existing_services_names:
print_log("Skipping service [{svc}] (conflict)" .format(svc=svc_name), logger=LOGGER)
else:
print_log("Adding service [{svc}]".format(svc=svc_name), logger=LOGGER)
svc = models.Service(
resource_name=svc_name,
resource_type=models.Service.resource_type_name,
url=svc_new_url,
type=svc_type,
configuration=svc_config,
sync_type=svc_sync_type
)
db_session.add(svc)
getcap_perm = Permission.GET_CAPABILITIES
if update_getcapabilities_permissions and anonymous_user is None:
print_log("Cannot update 'getcapabilities' permission of non existing anonymous user",
level=logging.WARN, logger=LOGGER)
elif update_getcapabilities_permissions and getcap_perm in SERVICE_TYPE_DICT[svc_type].permissions:
svc = db_session.query(models.Service.resource_id).filter_by(resource_name=svc_name).first()
svc_perm_getcapabilities = UserResourcePermissionService.by_resource_user_and_perm(
user_id=anonymous_user.id,
perm_name=getcap_perm.value,
resource_id=svc.resource_id,
db_session=db_session
)
if svc_perm_getcapabilities is None:
print_log("Adding '{}' permission to anonymous user.".format(getcap_perm.value), logger=LOGGER)
svc_perm_getcapabilities = models.UserResourcePermission(
user_id=anonymous_user.id,
perm_name=getcap_perm.value,
resource_id=svc.resource_id
)
db_session.add(svc_perm_getcapabilities)
transaction.commit()
if push_to_phoenix:
return _phoenix_update_services(services_dict)
return True | b2f96f213f1ab84e7be56788a1b4dad6d93dbe16 | 14,112 |
def journal(client):
"""
Fetch journal entries which reference a member.
"""
client.require_auth()
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
yield from cursor.execute("""
select A.tx_id, A.wallet_id, A.debit, A.credit, B.currency_id, C.narrative
from journal A
inner join wallet B on B.id = A.wallet_id
inner join wallet_transaction C on C.id = A.tx_id
where B.member_id = %s
order by C.created
""", [client.session["member_id"]])
rs = yield from cursor.fetchall()
return [dict(i) for i in rs] | 06eada531634f25ba076114b3858eae0a75b1807 | 14,113 |
def triplet_to_rrggbb(rgbtuple):
"""Converts a (red, green, blue) tuple to #rrggbb."""
hexname = _tripdict.get(rgbtuple)
if hexname is None:
hexname = '#%02x%02x%02x' % rgbtuple
_tripdict[rgbtuple] = hexname
return hexname | 9ba66d9aadb8385726178b32d69e14adfe380229 | 14,114 |
import math
def stab_cholesky(M):
""" A numerically stable version of the Cholesky decomposition.
Used in the GLE implementation. Since many of the matrices used in this
algorithm have very large and very small numbers in at once, to handle a
wide range of frequencies, a naive algorithm can end up having to calculate
the square root of a negative number, which breaks the algorithm. This is
due to numerical precision errors turning a very tiny positive eigenvalue
into a tiny negative value.
Instead of this, an LDU decomposition is used, and any small negative numbers
in the diagonal D matrix are assumed to be due to numerical precision errors,
and so are replaced with zero.
Args:
M: The matrix to be decomposed.
"""
n = M.shape[1]
D = np.zeros(n,float)
L = np.zeros(M.shape,float)
for i in range(n):
L[i,i] = 1.
for j in range(i):
L[i,j] = M[i,j]
for k in range(j):
L[i,j] -= L[i,k]*L[j,k]*D[k]
if (not D[j] == 0.0):
L[i,j] = L[i,j]/D[j]
D[i] = M[i,i]
for k in range(i):
D[i] -= L[i,k]*L[i,k]*D[k]
S = np.zeros(M.shape,float)
for i in range(n):
if (D[i]>0):
D[i] = math.sqrt(D[i])
else:
warning("Zeroing negative element in stab-cholesky decomposition: " + str(D[i]), verbosity.low)
D[i] = 0
for j in range(i+1):
S[i,j] += L[i,j]*D[j]
return S | 73f2989bb77513090b8ccbcf99b5f31a3aab9115 | 14,115 |
from datetime import datetime
import json
def prodNeventsTrend(request):
"""
The view presents historical trend of nevents in different states for various processing types
Default time window - 1 week
"""
valid, response= initRequest(request)
defaultdays = 7
equery = {}
if 'days' in request.session['requestParams'] and request.session['requestParams']['days']:
try:
days = int(request.session['requestParams']['days'])
except:
days = defaultdays
starttime = datetime.now() - timedelta(days=days)
endtime = datetime.now()
request.session['requestParams']['days'] = days
else:
starttime = datetime.now() - timedelta(days=defaultdays)
endtime = datetime.now()
request.session['requestParams']['days'] = defaultdays
equery['timestamp__range'] = [starttime, endtime]
if 'processingtype' in request.session['requestParams'] and request.session['requestParams']['processingtype']:
if '|' not in request.session['requestParams']['processingtype']:
equery['processingtype'] = request.session['requestParams']['processingtype']
else:
pts = request.session['requestParams']['processingtype'].split('|')
equery['processingtype__in'] = pts
events = ProdNeventsHistory.objects.filter(**equery).values()
timeline = set([ev['timestamp'] for ev in events])
timelinestr = [datetime.strftime(ts, defaultDatetimeFormat) for ts in timeline]
if 'view' in request.session['requestParams'] and request.session['requestParams']['view'] and request.session['requestParams']['view'] == 'separated':
view = request.session['requestParams']['view']
else:
view = 'joint'
plot_data = []
if view == 'joint':
ev_states = ['running', 'waiting']
data = {}
for es in ev_states:
data[es] = {}
for ts in timelinestr:
data[es][ts] = 0
for ev in events:
for es in ev_states:
data[es][datetime.strftime(ev['timestamp'], defaultDatetimeFormat)] += ev['nevents' + str(es)]
else:
processingtypes = set([ev['processingtype'] for ev in events])
ev_states = ['running', 'waiting']
lines = []
for prtype in processingtypes:
for evst in ev_states:
lines.append(str(prtype + '_' + evst))
if len(processingtypes) > 1:
lines.append('total_running')
lines.append('total_waiting')
data = {}
for l in lines:
data[l] = {}
for ts in timelinestr:
data[l][ts] = 0
for ev in events:
for l in lines:
if ev['processingtype'] in l:
data[l][datetime.strftime(ev['timestamp'], defaultDatetimeFormat)] += ev['nevents' + str(l.split('_')[1])]
if l.startswith('total'):
data[l][datetime.strftime(ev['timestamp'], defaultDatetimeFormat)] += ev['nevents' + str(l.split('_')[1])]
for key, value in data.items():
newDict = {'state': key, 'values':[]}
for ts, nevents in value.items():
newDict['values'].append({'timestamp': ts, 'nevents':nevents})
newDict['values'] = sorted(newDict['values'], key=lambda k: k['timestamp'])
plot_data.append(newDict)
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
plot_data_list = [['timestamp'],]
plot_data_list[0].extend([point['timestamp'] for point in plot_data[0]['values']])
for i, line in enumerate(plot_data):
plot_data_list.append([line['state']])
plot_data_list[i+1].extend([point['nevents'] for point in plot_data[i]['values']])
dump = json.dumps(plot_data_list, cls=DateEncoder)
return HttpResponse(dump, content_type='application/json')
else:
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'built': datetime.now().strftime("%H:%M:%S"),
'plotData': json.dumps(plot_data)
}
response = render_to_response('prodNeventsTrend.html', data, content_type='text/html')
setCacheEntry(request, "prodNeventsTrend", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response | 55b7fcbf352e98b01ce4146ff5b5984c86c435d3 | 14,116 |
def create_storage_policy_zios(session, cloud_name, zios_id, policy_name, drive_type, drive_quantity,
policy_type_id, description=None, return_type=None, **kwargs):
"""
Creates a new policy to ZIOS.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type cloud_name: str
:param cloud_name: The cloud 'name' as returned by get_all_clouds. For
example: 'zadaralab01'. Required.
:type zios_id: int
:param zios_id: The ZIOS 'id' value as returned by get_all_zios_objects. Required.
:type policy_name: str
:param policy_name: Policy name. Required
:type drive_type: str
:param drive_type: Drive type internal name. Required
:type drive_quantity: int
:param drive_quantity: Number of drives to add. Required.
:type policy_type_id: int
:param policy_type_id: Storage policy type id. Required.
:type description: str
:param description: Policy description
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
zios_id = verify_zios_id(zios_id)
cloud_name = verify_cloud_name(cloud_name)
drive_type = verify_field(drive_type, 'drive_type')
drive_quantity = verify_capacity(drive_quantity, 'drive_quantity')
policy_type_id = verify_capacity(policy_type_id, 'policy_type_id')
body_values = {"name":policy_name, "drive_type":drive_type,
"drive_quantity":drive_quantity, "policy_type_id":policy_type_id}
if description is not None:
body_values["description"] = description
path = "/api/clouds/{0}/zioses/{1}/policy.json".format(cloud_name, zios_id)
return session.post_api(path=path, body=body_values, return_type=return_type, **kwargs) | 16cec8686df1fb634064e75478364285dcfc3c1d | 14,117 |
def format_tooltips(G, **kwargs):
""" Annotate G, format tooltips.
"""
# node data = [(n, {...}), ...]
node_data = {}
if isinstance(G, nx.Graph):
node_data = G.nodes(True)
elif 'nodes' in G:
node_data = [(d["id"], d) for d in G['nodes']]
# unique ids
member_uids = np.sort(np.unique([
__ for n,d in node_data for __ in d['members']
]))
# array of tooltips
node_tooltips = []
for n,d in node_data:
# progress
print("Formatting tooltip... NodeID:", n)
member_ids = d['members']
# member images
images = d['image'][member_ids]
images = [IMG_HTML.format(src=_) for _ in images]
# format tooltip for node
node_tooltip = NODE_HTML.format(
node_id=n, node_name=d['name'],
node_size=len(member_ids),
data_size=len(member_uids),
images=images
)
# add to array
node_tooltips.append(node_tooltip)
# make numpy array
return np.array(node_tooltips) | cfbfc3012dffce017110288847f3bcefa4612645 | 14,118 |
def add_centroid_frags(fragList, atmList):
"""Add centroid to each fragment."""
for frag in fragList:
atoms = [atmList[i] for i in frag['ids']]
frag['cx'], frag['cy'], frag['cz'] = centroid_atmList(atoms)
return fragList | 1f050fcf0b60a7bb62d6d5be844b9a895e91fc7f | 14,120 |
def _apply_sobel(img_matrix):
"""
Input: img_matrix(height, width) with type float32
Convolves the image with sobel mask and returns the magnitude
"""
dx = sobel(img_matrix, 1)
dy = sobel(img_matrix, 0)
grad_mag = np.hypot(dx, dy) # Calculates sqrt(dx^2 + dy^2)
grad_mag *= 255 / grad_mag.max() # Normalize the gradient magnitudes
return grad_mag | 5c297cf822e1d5cba092070ecb52f57b1dbe720b | 14,122 |
def isDeleted(doc_ref):
"""
Checks if document is logically deleted, i.e. has a deleted timestamp.
Returns: boolean
"""
return exists(doc_ref) and 'ts_deleted' in get_doc(doc_ref) | 0c7357357edfc645c771acbe40730cb4668fe13e | 14,123 |
from typing import Optional
def sys_wait_for_event(
mask: int, k: Optional[Key], m: Optional[Mouse], flush: bool
) -> int:
"""Wait for an event then return.
If flush is True then the buffer will be cleared before waiting. Otherwise
each available event will be returned in the order they're recieved.
Args:
mask (int): :any:`Event types` to wait for.
k (Optional[Key]): A tcod.Key instance which might be updated with
an event. Can be None.
m (Optional[Mouse]): A tcod.Mouse instance which might be updated
with an event. Can be None.
flush (bool): Clear the event buffer before waiting.
.. deprecated:: 9.3
Use the :any:`tcod.event.wait` function to wait for events.
"""
return int(
lib.TCOD_sys_wait_for_event(
mask,
k.key_p if k else ffi.NULL,
m.mouse_p if m else ffi.NULL,
flush,
)
) | 4c0ba8f8b49f0f0dc837739afb46f667785b8a8c | 14,124 |
def get_test():
"""
Return test data.
"""
context = {}
context['test'] = 'this is a test message'
return flask.jsonify(**context) | 01f99a070a61414461d9a407574591f715ca5c63 | 14,125 |
def num_poisson_events(rate, period, rng=None):
"""
Returns the number of events that have occurred in a Poisson
process of ``rate`` over ``period``.
"""
if rng is None:
rng = GLOBAL_RNG
events = 0
while period > 0:
time_to_next = rng.expovariate(1.0/rate)
if time_to_next <= period:
events = events + 1
period = period - time_to_next
return events | 0f2378040bcf6193507bd15cb01c9e753e5c5235 | 14,126 |
import fnmatch
def findmatch(members,classprefix):
"""Find match for class member."""
lst = [n for (n,c) in members]
return fnmatch.filter(lst,classprefix) | 05038eb4796161f4cc64674248473c01fd4b13aa | 14,127 |
def is_narcissistic(number):
"""Must return True if number is narcissistic"""
return sum([pow(int(x), len(str(number))) for x in str(number)]) == number | b94486d4df52b7108a1c431286e7e86c799abf58 | 14,128 |
def Plot1DFields(r,h,phi_n_bar,g_s,g_b):
"""
Generates a nice plot of the 1D fields with 2 axes and a legend.
Note: The sizing works well in a jupyter notebook
but probably should be adjusted for a paper.
"""
fig,ax1 = plt.subplots(figsize=(6.7,4))
fig.subplots_adjust(right=0.8)
ax2 = ax1.twinx()
p1, = ax1.plot(r,h,'C0-',label=r'$h$')
p2, = ax2.plot(r,phi_n_bar,'C1-',label=r'$\bar{\phi}_n$')
p3, = ax2.plot(r,g_s,'C2-',label=r'$g_s$')
p4, = ax2.plot(r,g_b,'C3-',label=r'$g_b$')
ax1.set_xlabel(r'$r$',labelpad=0)
ax1.set_ylabel(r'$h$',rotation=0,labelpad=10)
ax1.set_xlim(r[0],r[-1])
ax2.set_ylabel('$\\bar{\\phi}_n$\n$g_s$\n$g_b$',rotation=0,labelpad=12,va='center')
ax2.set_ylim(-0.05,1.05)
lines = [p1,p2,p3,p4]
ax1.legend(lines,[l.get_label() for l in lines],loc='center left',bbox_to_anchor=(1.16,0.54))
return fig,[ax1,ax2] | 3e48dc6745e49ca36a2d9d1ade8b684ac24c3c25 | 14,129 |
def get_yesterday():
"""
:return:
"""
return _get_passed_one_day_from_now(days=1).date() | 99201bd9cde9fdf442d17a6f1c285523e3b867cc | 14,130 |
def classroom_mc():
"""
Corresponds to the 2nd line of Table 4 in https://doi.org/10.1101/2021.10.14.21264988
"""
concentration_mc = mc.ConcentrationModel(
room=models.Room(volume=160, inside_temp=models.PiecewiseConstant((0., 24.), (293,)), humidity=0.3),
ventilation=models.MultipleVentilation(
ventilations=(
models.SlidingWindow(
active=models.PeriodicInterval(period=120, duration=120),
outside_temp=TorontoTemperatures['Dec'],
window_height=1.6,
opening_length=0.2,
),
models.AirChange(active=models.PeriodicInterval(period=120, duration=120), air_exch=0.25),
)
),
infected=mc.InfectedPopulation(
number=1,
presence=models.SpecificInterval(((0, 2), (2.5, 4), (5, 7), (7.5, 9))),
virus=virus_distributions['SARS_CoV_2_ALPHA'],
mask=models.Mask.types["No mask"],
activity=activity_distributions['Light activity'],
expiration=build_expiration('Speaking'),
host_immunity=0.,
),
evaporation_factor=0.3,
)
return mc.ExposureModel(
concentration_model=concentration_mc,
short_range=(),
exposed=mc.Population(
number=19,
presence=models.SpecificInterval(((0, 2), (2.5, 4), (5, 7), (7.5, 9))),
activity=activity_distributions['Seated'],
mask=models.Mask.types["No mask"],
host_immunity=0.,
),
) | c272bc1de9b5b76eb55aa5b8d6dfbe42d2c95e66 | 14,131 |
import linecache
import ast
def smart_eval(stmt, _globals, _locals, filename=None, *, ast_transformer=None):
"""
Automatically exec/eval stmt.
Returns the result if eval, or NoResult if it was an exec. Or raises if
the stmt is a syntax error or raises an exception. If stmt is multiple
statements ending in an expression, the statements are exec-ed and the
final expression is eval-ed and returned as the result.
filename should be the filename used for compiling the statement. If
given, stmt will be saved to the Python linecache, so that it appears in
tracebacks. Otherwise, a default filename is used and it isn't saved to the
linecache. To work properly, "fake" filenames should start with < and end
with >, and be unique for each stmt.
Note that classes defined with this will have their module set to
'__main__'. To change this, set _globals['__name__'] to the desired
module.
To transform the ast before compiling it, pass in an ast_transformer
function. It should take in an ast and return a new ast.
Examples:
>>> g = l = {}
>>> smart_eval('1 + 1', g, l)
2
>>> smart_eval('a = 1 + 1', g, l)
<class 'mypython.mypython.NoResult'>
>>> g['a']
2
>>> smart_eval('a = 1 + 1; a', g, l)
2
"""
if filename:
if filename != "<stdin>":
# (size, mtime, lines, fullname)
linecache.cache[filename] = (len(stmt), None, stmt.splitlines(keepends=True), filename)
else:
filename = mypython_file()
p = ast.parse(stmt)
if ast_transformer:
p = ast_transformer(p)
expr = None
res = NoResult
if p.body and isinstance(p.body[-1], ast.Expr):
expr = p.body.pop()
code = compile(p, filename, 'exec')
exec(code, _globals, _locals)
if expr:
code = compile(ast.Expression(expr.value), filename, 'eval')
res = eval(code, _globals, _locals)
return res | d314e1a2f5536304f302ca7c79875a894275b171 | 14,132 |
def cmip_recipe_basics(func):
"""A decorator for starting a cmip recipe
"""
def parse_and_run(*args, **kwargs):
set_verbose(_logger, kwargs.get('verbose'))
opts = parse_recipe_options(kwargs.get('options'), add_cmip_collection_args_to_parser)
# Recipe is run.
returnval = func(*args, **kwargs)
return returnval
return parse_and_run | 3411b68180d878802379a413524f9a3db185a654 | 14,135 |
def serialize_cupcake(cupcake):
"""Serialize a cupcake SQLAlchemy obj to dictionary."""
return {
"id": cupcake.id,
"flavor": cupcake.flavor,
"size": cupcake.size,
"rating": cupcake.rating,
"image": cupcake.image,
} | 35fa140cf8b6527984002e28be1f102ee6c71a1b | 14,137 |
def compute_accuracy(labels, logits):
"""Compute accuracy for a single batch of data, given the precomputed logits
and expected labels. The returned accuracy is normalized by the batch size.
"""
current_batch_size = tf.cast(labels.shape[0], tf.float32)
# logits is the percent chance; this gives the category for each.
predictions = tf.argmax(logits, axis=1)
# return the average number of items equal to their label.
return tf.reduce_sum(tf.cast(tf.equal(labels, predictions),
tf.float32)) / current_batch_size | 2e53fc01053a5caafa2cdd976715dd31d3d43b0f | 14,138 |
import torch
def get_data(generic_iterator):
"""Code to get minibatch from data iterator
Inputs:
- generic_iterator; iterator for dataset
Outputs:
- data; minibatch of data from iterator
"""
data = next(generic_iterator)
if torch.cuda.is_available():
data = data.cuda()
return data | 364151694fb452279691986f5533e182a8b905f3 | 14,139 |
from re import T
def aug_transform(crop, base_transform, cfg, extra_t=[]):
""" augmentation transform generated from config """
return T.Compose(
[
T.RandomApply(
[T.ColorJitter(cfg.cj0, cfg.cj1, cfg.cj2, cfg.cj3)], p=cfg.cj_p
),
T.RandomGrayscale(p=cfg.gs_p),
T.RandomResizedCrop(
crop,
scale=(cfg.crop_s0, cfg.crop_s1),
ratio=(cfg.crop_r0, cfg.crop_r1),
interpolation=3,
),
T.RandomHorizontalFlip(p=cfg.hf_p),
*extra_t,
base_transform(),
]
) | 4d8ac62e4ad550f563d9adb237db8853a0c7d36a | 14,140 |
def _check_definition_contains_or(definition_dict, key, values):
"""need docstring"""
out = False
for value in values:
if (np.array(list(definition_dict[key])) == value).any():
out = True
break
return out | bb15bdbe50476ea46425be20e0c35229352ba03f | 14,141 |
def concurrent_map(func, data):
"""
Similar to the bultin function map(). But spawn a thread for each argument
and apply `func` concurrently.
Note: unlike map(), we cannot take an iterable argument. `data` should be an
indexable sequence.
WARNING : this function doesn't limit the number of threads at the same time
"""
N = len(data)
result = [None] * N
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [Thread(target=task_wrapper, args=(i,)) for i in range(N)]
for t in threads:
t.start()
for t in threads:
t.join()
return result | d88c66af120f9a4408bf6e1d61c08f2fdcf81acd | 14,142 |
def star_hexagon(xy, radius=5, **kwargs):
"""
|\
c | \ b
|__\
a
"""
x,y = xy
r = radius
a = 1/4*r
b = a*2
c = a*3**(1/2)
return plt.Polygon(xy=(
(x, y-2*c), (x+a, y-c), (x+a+b, y-c),
(x+b, y), (x+a+b, y+c), (x+a, y+c),
(x, y+2*c), (x-a, y+c), (x-a-b, y+c),
(x-b, y), (x-a-b, y-c), (x-a, y-c),
), closed=True, **kwargs) | 62f09f26e98723764d03a678634cbb00f051e105 | 14,143 |
def calibrate(leveled_arcs, sat_biases, stn_biases):
"""
???
"""
calibrated_arcs = []
for arc in leveled_arcs:
if arc.sat[0] == 'G':
sat_bias = sat_biases['GPS'][int(arc.sat[1:])][0] * NS_TO_TECU
stn_bias = stn_biases['GPS'][arc.stn.upper()][0] * NS_TO_TECU
elif arc.sat[0] == 'R':
sat_bias = sat_biases['GLONASS'][int(arc.sat[1:])][0] * NS_TO_TECU
stn_bias = stn_biases['GLONASS'][arc.stn.upper()][0] * NS_TO_TECU
else:
raise ValueError('Satellite bias for {} not found'.format(arc.sat))
data_map = {'gps_time': arc.gps_time.values,
'az': arc.az.values,
'el': arc.el.values,
'satx': arc.satx.values,
'saty': arc.saty.values,
'satz': arc.satz.values,
'sobs': arc.L_I + sat_bias + stn_bias,
'sprn': arc.P_I + sat_bias + stn_bias}
calibrated_arc = CalibratedArc(data_map)
calibrated_arc.xyz = arc.xyz
calibrated_arc.llh = arc.llh
calibrated_arc.stn = arc.stn
calibrated_arc.recv_type = arc.recv_type
calibrated_arc.sat = arc.sat
calibrated_arc.L = arc.L
calibrated_arc.L_scatter = arc.L_scatter
calibrated_arc.sat_bias = sat_bias
calibrated_arc.stn_bias = stn_bias
calibrated_arcs.append(calibrated_arc)
return calibrated_arcs | 63065e0000ebaa48b71d3f9ed9814277b6bf63ed | 14,144 |
def negSamplingCostAndGradient(predicted, target, outputVectors, dataset, K=10):
"""
Implements the negative sampling cost function and gradients for word2vec
:param predicted: ndarray, the predicted (center) word vector(v_c)
:param target: integer, the index of the target word
:param outputVectors: 2D ndarray, output word vectors (as rows)
:param dataset: an interface into the dataset
:param K: integer, no of negative samples
:return:
cost: cost function for negative sampling
gradPred: gradient with respect to predicted (input / center) word vector
grad: gradient with respect to output word vectors
"""
grad = np.zeros(outputVectors.shape)
gradPred = np.zeros(predicted.shape)
indices = [target]
for k in xrange(K):
newidx = dataset.sampleTokenIdx()
while newidx == target:
newidx = dataset.sampleTokenIdx()
indices += [newidx]
labels = np.array([1] + [-1 for k in xrange(K)]).reshape(-1, 1)
vecs = outputVectors[indices, :]
t = sigmoid(vecs.dot(predicted.T) * labels)
cost = -np.sum(np.log(t))
delta = labels * (t - 1)
gradPred = delta.reshape((1, K + 1)).dot(vecs).flatten()
gradtemp = delta.dot(predicted)
for k in xrange(K + 1):
grad[indices[k]] += gradtemp[k, :]
return cost, gradPred, grad | 4e1fbf082d97b1a4c7b5b5f9ee722c54fc993712 | 14,145 |
from datetime import datetime
import pytz
def isotime(timestamp):
"""ISO 8601 formatted date in UTC from unix timestamp"""
return datetime.fromtimestamp(timestamp, pytz.utc).isoformat() | f6a922d75a186e26f158edc585691e31bf430b01 | 14,146 |
def initializeSeam():
"""
This function defines the seams of a baseball. It is
based, in large extant, on the work from
http://www.darenscotwilson.com/spec/bbseam/bbseam.html
"""
n = 109 #number of points were calculating on the seam line
alpha = np.linspace(0,np.pi*2,n)
x = np.zeros(len(alpha))
y = np.zeros(len(alpha))
z = np.zeros(len(alpha))
R = (2 + 15/16.)/2
for i in range(len(alpha)-1):
x[i] = ((1/13)*R*((9*np.cos(alpha[i]) - 4*np.cos(3*alpha[i]))))
y[i] = ((1/13)*R*((9*np.sin(alpha[i]) + 4*np.sin(3*alpha[i]))))
z[i] = ((12/13)*R*np.cos(2*alpha[i]))
return x,y,z | 43c7e968ecd98595c46e679676f23cbb07d28bb3 | 14,147 |
def check_model_consistency(model, grounding_dict, pos_labels):
"""Check that serialized model is consistent with associated json files.
"""
groundings = {grounding for grounding_map in grounding_dict.values()
for grounding in grounding_map.values()}
model_labels = set(model.estimator.named_steps['logit'].classes_)
consistent_labels = groundings <= model_labels
shortforms = set(grounding_dict.keys())
model_shortforms = set(model.shortforms)
consistent_shortforms = shortforms == model_shortforms
model_labels = set(model.estimator.named_steps['logit'].classes_)
consistent_pos_labels = set(pos_labels) <= model_labels
return consistent_labels and consistent_shortforms and \
consistent_pos_labels | b5d1beda0be5ceccec158839c61c1d79349596ef | 14,148 |
def get_submission_info(tile_grid, collections, tile_indices,
period_start, period_end, period_freq):
""" Return information about tracked order submissions
"""
return {
'submitted': dt.datetime.today().isoformat(),
'collections': collections,
'tile_grid': tile_grid.to_dict(),
'tile_indices': list(tile_indices),
'period_start': period_start.isoformat(),
'period_end': period_end.isoformat(),
'period_freq': period_freq
} | 990740ef15760fd5514598772d496db47a436786 | 14,149 |
def load_obj(path):
"""Load an object from a Python file.
path is relative to the data dir. The file is executed and the obj
local is returned.
"""
localdict = {}
with open(_DATADIR / path) as file:
exec(file.read(), localdict, localdict)
return localdict['obj'] | 8c44141e58d0aa1402f6d5244857fbe3d07ddc84 | 14,150 |
def dp_policy_evaluation(env, pi, v=None, gamma=1, tol=1e-3, iter_max=100,
verbose=True):
"""Evaluates state-value function by performing iterative policy evaluation
via Bellman expectation equation (in-place)
Based on Sutton/Barto, Reinforcement Learning, 2nd ed. p. 75
Args:
env: Environment
pi: Policy
v: Initial value function or None
gamma: Discount factor
tol: Tolerance to stop iteration
iter_max: Maximum iteration count
Returns:
v: State-value function
"""
if v is None:
v = np.zeros(env.observation_space.n)
for i_iter in range(iter_max):
if verbose:
print("\r> DP Policy evaluation: Iteration {}/{}".format(
i_iter+1, iter_max), end="")
delta = 0
for state in range(env.observation_space.n):
v_new = 0
for action in range(env.action_space.n):
for (prob,state2,reward,done) in env.P[state][action]:
v_new += pi[state][action] * prob * (
reward + gamma*v[state2]
)
delta = max(delta, np.abs(v_new-v[state]))
v[state] = v_new
if delta < tol:
break
if verbose:
print()
return v | e920f48f6b37f9815b077b18e02b0403b78f2ce7 | 14,151 |
def gpst2utc(tgps, leaps_=-18):
""" calculate UTC-time from gps-time """
tutc = timeadd(tgps, leaps_)
return tutc | a1bf6aa583ae1827cce572f809831b3396bdd91b | 14,152 |
def create_shell(username, session_id, key):
"""Instantiates a CapturingSocket and SwiftShell and hooks them up.
After you call this, the returned CapturingSocket should capture all
IPython display messages.
"""
socket = CapturingSocket()
session = Session(username=username, session=session_id, key=key)
shell = SwiftShell.instance()
shell.display_pub.session = session
shell.display_pub.pub_socket = socket
return [socket, shell] | 13af90ea2497211c75d66fbff334ee95ede678b8 | 14,153 |
def _get_index_sort_str(env, name):
"""
Returns a string by which an object with the given name shall be sorted in
indices.
"""
ignored_prefixes = env.config.cmake_index_common_prefix
for prefix in ignored_prefixes:
if name.startswith(prefix) and name != prefix:
return name[len(prefix):]
return name | cdf7a509ef8f49ff15cac779e37f0bc5ab98c613 | 14,154 |
from datetime import datetime
def utcnow():
"""Gets current time.
:returns: current time from utc
:rtype: :py:obj:`datetime.datetime`
"""
return datetime.datetime.utcnow() | a85b4e28b0cbc087f3c0bb641e896958ea267c3f | 14,155 |
def elem2full(elem: str) -> str:
"""Retrieves full element name for short element name."""
for element_name, element_ids, element_short in PERIODIC_TABLE:
if elem == element_short:
print(element_name)
return element_name
else:
raise ValueError(f"Index {elem} does not match any element.") | 2c78531dc21722cc504182abec469eabdfeec862 | 14,156 |
def create_random_totp_secret(secret_length: int = 72) -> bytes:
"""
Generate a random TOTP secret
:param int secret_length: How long should the secret be?
:rtype: bytes
:returns: A random secret
"""
random = SystemRandom()
return bytes(random.getrandbits(8) for _ in range(secret_length)) | 6ecaf035212e5e4e2d8e71856c05ea15407fdb19 | 14,158 |
def _get_roles_can_update(community_id):
"""Get the full list of roles that current identity can update."""
return _filter_roles("members_update", {"user", "group"}, community_id) | 7701cc425a83212dbd5ffd039a629b06a17fcb83 | 14,159 |
def register_external_compiler(op_name, fexternal=None, level=10):
"""Register the external compiler for an op.
Parameters
----------
op_name : str
The name of the operator.
fexternal : function (attrs: Attrs, args: List[Expr], compiler: str)
-> new_expr: Expr
The function for wrapping a call expr with compiler_begin and
compiler_end.
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMExternalCompiler", fexternal, level) | 0d41fce383407af3d8a60d1886950424b89ee18b | 14,160 |
def kl_divergence_from_logits_bm(logits_a, logits_b):
"""Gets KL divergence from logits parameterizing categorical distributions.
Args:
logits_a: A tensor of logits parameterizing the first distribution.
logits_b: A tensor of logits parameterizing the second distribution.
Returns:
The (batch_size,) shaped tensor of KL divergences.
"""
beta_coeff = 1
alphas = tf.exp(logits_a)
betas = tf.exp(logits_b)
a_zero = tf.reduce_sum(alphas, -1)
loss1 = tf.lgamma(a_zero) - tf.reduce_sum(tf.lgamma(alphas), -1)
loss2 = tf.reduce_sum(
(alphas - betas) * (tf.digamma(alphas) - tf.digamma(tf.expand_dims(a_zero, -1))), -1)
kl_loss = loss1 + loss2
return kl_loss | 8078fcbd4c4c58bed888ba5b45e99783799bde42 | 14,161 |
import logging
def if_stopped_or_playing(speaker, action, args, soco_function, use_local_speaker_list):
"""Perform the action only if the speaker is currently in the desired playback state"""
state = speaker.get_current_transport_info()["current_transport_state"]
logging.info(
"Condition: '{}': Speaker '{}' is in state '{}'".format(
action, speaker.player_name, state
)
)
if (state != "PLAYING" and action == "if_playing") or (
state == "PLAYING" and action == "if_stopped"
):
logging.info("Action suppressed")
return True
action = args[0]
args = args[1:]
logging.info(
"Action invoked: '{} {} {}'".format(speaker.player_name, action, " ".join(args))
)
return process_action(
speaker, action, args, use_local_speaker_list=use_local_speaker_list
) | 7daa5bd040e6753ce1e39807071e0911a8dd3182 | 14,162 |
def compute_src_graph(hive_holder, common_table):
""" computes just the src part of the full version graph.
Side effect: updates requirements of blocks to actually point to real dep versions
"""
graph = BlockVersionGraph()
versions = hive_holder.versions
graph.add_nodes(versions.itervalues())
references = References()
for block_holder in hive_holder.block_holders:
dep_table = block_holder.requirements
base_version = versions[block_holder.block_name]
for target_bcn in block_holder.external_targets():
target_block_name = target_bcn.block_name
if target_block_name in versions:
other_version = versions[target_block_name]
else:
other_version = common_table[target_block_name]
references[other_version].add(target_bcn.cell_name)
graph.add_edge(base_version, other_version)
dep_table.add_version(other_version)
return graph, references | 55d150e583e93c0d5b25490543738f79ba23fe64 | 14,163 |
def get_uv(seed=0, nrm=False, vector=False):
"""Dataset with random univariate data
Parameters
----------
seed : None | int
Seed the numpy random state before generating random data.
nrm : bool
Add a nested random-effects variable (default False).
vector : bool
Add a 3d vector variable as ``ds['v']`` (default ``False``).
"""
if seed is not None:
np.random.seed(seed)
ds = permute([('A', ('a1', 'a2')),
('B', ('b1', 'b2')),
('rm', ['s%03i' % i for i in range(20)])])
ds['rm'].random = True
ds['intvar'] = Var(np.random.randint(5, 15, 80))
ds['intvar'][:20] += 3
ds['fltvar'] = Var(np.random.normal(0, 1, 80))
ds['fltvar'][:40] += 1.
ds['fltvar2'] = Var(np.random.normal(0, 1, 80))
ds['fltvar2'][40:] += ds['fltvar'][40:].x
ds['index'] = Var(np.repeat([True, False], 40))
if nrm:
ds['nrm'] = Factor(['s%03i' % i for i in range(40)], tile=2, random=True)
if vector:
x = np.random.normal(0, 1, (80, 3))
x[:40] += [.3, .3, .3]
ds['v'] = NDVar(x, (Case, Space('RAS')))
return ds | 1394ae09705aa01f9309399ab8f1b7fcff04e010 | 14,164 |
from typing import Iterable
def private_names_for(cls, names):
"""
Returns:
Iterable of private names using privateNameFor()"""
if not isinstance(names, Iterable):
raise TypeError('names must be an interable')
return (private_name_for(item, cls) for item in names) | 606afdcfd8eed1e288df71a79f50a37037d84139 | 14,165 |
def invert_trimat(A, lower=False, right_inv=False, return_logdet=False, return_inv=False):
"""Inversion of triangular matrices.
Returns lambda function f that multiplies the inverse of A times a vector.
Args:
A: Triangular matrix.
lower: if True A is lower triangular, else A is upper triangular.
right_inv: If False, f(v)=A^{-1}v; if True f(v)=v' A^{-1}
return_logdet: If True, it also returns the log determinant of A.
return_inv: If True, it also returns A^{-1}
Returns:
Lambda function that multiplies A^{-1} times vector.
Log determinant of A
A^{-1}
"""
if right_inv:
fh=lambda x: la.solve_triangular(A.T, x.T, lower=not(lower)).T
else:
fh=lambda x: la.solve_triangular(A, x, lower=lower)
if return_logdet or return_inv:
r = [fh]
else:
r = fh
if return_logdet:
logdet=np.sum(np.log(np.diag(A)))
r.append(logdet)
if return_inv:
invA=fh(np.eye(A.shape[0]))
r.append(invA)
return r | ad449fe1718136e64a6896e74fbb8ee7a3cefcec | 14,167 |
def category_input_field_delete(request, structure_slug,
category_slug, module_id,
field_id, structure):
"""
Deletes a field from a category input module
:type structure_slug: String
:type category_slug: String
:type module_id: Integer
:type field_id: Integer
:type structure: OrganizationalStructure (from @is_manager)
:param structure_slug: structure slug
:param category_slug: category slug
:param module_id: input module id
:param field_id: module field id
:param structure: structure object (from @is_manager)
:return: redirect
"""
category = get_object_or_404(TicketCategory,
organizational_structure=structure,
slug=category_slug)
module = get_object_or_404(TicketCategoryModule,
pk=module_id,
ticket_category=category)
if not module.can_be_deleted():
# log action
logger.error('[{}] manager of structure {}'
' {} tried to delete a field'
' from module {} of category {}'.format(timezone.localtime(),
structure,
request.user,
module,
category))
messages.add_message(request, messages.ERROR,
_("Impossibile eliminare il modulo {}."
" Ci sono delle richieste collegate").format(module))
else:
field = get_object_or_404(TicketCategoryInputList,
pk=field_id,
category_module=module)
# log action
logger.info('[{}] manager of structure {}'
' {} deleted the field {}'
' from module {} of category {}'.format(timezone.localtime(),
structure,
request.user,
field,
module,
category))
field.delete()
messages.add_message(request, messages.SUCCESS,
_("Campo {} eliminato con successo").format(field.name))
return redirect('uni_ticket:manager_category_input_module',
structure_slug=structure_slug,
category_slug=category_slug,
module_id=module_id) | f5319ea5b992529e7b8d484b1dc6c0be621f9955 | 14,168 |
def cat_to_num(att_df):
"""
Changes categorical variables in a dataframe to numerical
"""
att_df_encode = att_df.copy(deep=True)
for att in att_df_encode.columns:
if att_df_encode[att].dtype != float:
att_df_encode[att] = pd.Categorical(att_df_encode[att])
att_df_encode[att] = att_df_encode[att].cat.codes
return att_df_encode | dbd57022ddd99d8ea4936da45d8c2cbe09078b81 | 14,169 |
async def handle_get(request):
"""Handle GET request, can be display at http://localhost:8080"""
text = (f'Server is running at {request.url}.\n'
f'Try `curl -X POST --data "text=test" {request.url}example`\n')
return web.Response(text=text) | 2398870ab6479d8db3517b89e0177cab674156b0 | 14,170 |
def values_target(size: tuple, value: float, cuda: False) -> Variable:
""" returns tensor filled with value of given size """
result = Variable(full(size=size, fill_value=value))
if cuda:
result = result.cuda()
return result | be9db2c08fbac00e1f8d10b859da8422e7331901 | 14,171 |
def get_new_perpendicular_point_with_custom_distance_to_every_line_segment(
line_segments: np.ndarray, distance_from_the_line: np.ndarray
):
"""
:param line_segments: array of shape [number_of_line_segments, 2, 2]
:param distance_from_the_line: how far the new point to create from the reference
:return:
"""
return new_perpendicular_point_to_line_segment(
line_segments, distance_from_the_line
) | 3ebb94b9fa4b7f28e655a3e9a4fe93ec40276dff | 14,172 |
import requests
def tmdb_find_movie(movie: str, tmdb_api_token: str):
"""
Search the tmdb api for movies by title
Args:
movie (str): the title of a movie
tmdb_api_token (str): your tmdb v3 api token
Returns:
dict
"""
url = 'https://api.themoviedb.org/3/search/movie?'
params = {'query': movie, 'language': 'en-US', 'api_key': tmdb_api_token, }
return requests.get(url, params).json() | ea676fbb91f451b20ce4cd2f7258240ace3925b3 | 14,173 |
def is_missing_artifact_error(err: WandbError):
"""
Check if a specific W&B error is caused by a 404 on the artifact we're looking for.
"""
# This is brittle, but at least we have a test for it.
return "does not contain artifact" in err.message | 023bdab0b3a2914272a1087a5c42ba81ec064548 | 14,174 |
def create_reforecast_valid_times(start_year=2000):
"""Inits from year 2000 to 2019 for the same days as in 2020."""
reforecasts_inits = []
inits_2020 = create_forecast_valid_times().forecast_time.to_index()
for year in range(start_year, reforecast_end_year + 1):
# dates_year = pd.date_range(start=f"{year}-01-02", end=f"{year}-12-31", freq="7D")
dates_year = pd.DatetimeIndex([i.strftime("%Y%m%d").replace("2020", str(year)) for i in inits_2020])
dates_year = xr.DataArray(
dates_year,
dims="forecast_time",
coords={"forecast_time": dates_year},
)
reforecasts_inits.append(dates_year)
reforecasts_inits = xr.concat(reforecasts_inits, dim="forecast_time")
reforecast_valid_times = create_valid_time_from_forecast_time_and_lead_time(reforecasts_inits, leads)
reforecast_valid_times = (
reforecast_valid_times.rename("test").assign_coords(valid_time=reforecast_valid_times).to_dataset()
)
reforecast_valid_times = xr.ones_like(reforecast_valid_times).astype("float32")
return reforecast_valid_times | 0ea34549aef8b8b4551534560ab29c9580f9f1ca | 14,175 |
def _checkerror(fulloutput):
"""
Function to check the full output for known strings and plausible fixes to the error.
Future: add items to `edict` where the key is a unique string contained in the offending
output, and the data is the reccomended solution to resolve the problem
"""
edict = {'multiply': ('NOTE: you might(?) need to clean the `tmp/` folder!'),
'already defined': ('NOTE: you probably (might?) need to clean the `tmp/` folder!'),
'unresolved externals': ('NOTE: consider recompiling the linked libraries to'
'have the correct name mangling for cl.exe:'
'ifort: /names:lowercase /assume:underscore '),
"KeyError: 'void'": ('There may be an issue with public/private function '
'definitions or a missing variable definition in the last '
'function listed above. For the first error consider using '
'the parameter `functiondict` or checking to ensure all '
'module functions are public... For the second error, check '
'that all of the parameters in the subroutine are defined'),
"No such file or directory": ('There may be a space in the path to one of the '
'source code or library folders'),
"LINK : fatal error LNK1104: cannot open file": ('The pyd is currently in use, '
'restart any kernels using it !')
}
# iterate through the keys in the error dictionary and see if the key is in the full output
extramessage = ''
for error_key in edict.keys():
if error_key in fulloutput:
extramessage = edict[error_key]
return extramessage | 5312beff6f998d197a3822e04e60d47716520f50 | 14,176 |
def create_pre_process_block(net, ref_layer_name, means, scales=None):
"""
Generates the pre-process block for the IR XML
Args:
net: root XML element
ref_layer_name: name of the layer where it is referenced to
means: tuple of values
scales: tuple of values
Returns:
pre-process XML element
"""
pre_process = SubElement(net, 'pre-process')
pre_process.set('reference-layer-name', ref_layer_name)
for idx in range(len(means)):
channel_xml = SubElement(pre_process, 'channel')
channel_xml.set('id', str(idx))
mean_xml = SubElement(channel_xml, 'mean')
mean_xml.set('value', str(means[idx]))
if scales:
scale_xml = SubElement(channel_xml, 'scale')
scale_xml.set('value', str(scales[idx]))
return pre_process | 54013ec9d06cf7eff9b0af18d1655a5455a894be | 14,177 |
def GetSystemFaultsFromState(state, spot_wrapper):
"""Maps system fault data from robot state proto to ROS SystemFaultState message
Args:
data: Robot State proto
spot_wrapper: A SpotWrapper object
Returns:
SystemFaultState message
"""
system_fault_state_msg = SystemFaultState()
system_fault_state_msg.faults = getSystemFaults(state.system_fault_state.faults, spot_wrapper)
system_fault_state_msg.historical_faults = getSystemFaults(state.system_fault_state.historical_faults, spot_wrapper)
return system_fault_state_msg | cda2d0bbe3ee3ca02724828d9f0f882695c3e0b0 | 14,178 |
def findAnEven(L):
"""
:Assumes L is a list of integers:
:Returns the first even number in L:
:Raises ValueError if L does not contain an even number:
"""
for num in L:
if num % 2 == 0:
return num
raise ValueError | 93f7854bd376d52df40b23d21bfde784db124106 | 14,179 |
def get_points(wire):
"""
get all points (including starting point), where the wire bends
>>> get_points(["R75","D30","R83","U83","L12","D49","R71","U7","L72"])
[((0, 0), (75, 0)), ((75, 0), (75, -30)), ((75, -30), (158, -30)), ((158, -30), (158, 53)), ((158, 53), (146, 53)), ((146, 53), (146, 4)), ((146, 4), (217, 4)), ((217, 4), (217, 11)), ((217, 11), (145, 11))]
>>> get_points(["U62","R66","U55","R34","D71","R55","D58","R83"])
[((0, 0), (0, 62)), ((0, 62), (66, 62)), ((66, 62), (66, 117)), ((66, 117), (100, 117)), ((100, 117), (100, 46)), ((100, 46), (155, 46)), ((155, 46), (155, -12)), ((155, -12), (238, -12))]
>>> get_points(["R98","U47","R26","D63","R33","U87","L62","D20","R33","U53","R51"])
[((0, 0), (98, 0)), ((98, 0), (98, 47)), ((98, 47), (124, 47)), ((124, 47), (124, -16)), ((124, -16), (157, -16)), ((157, -16), (157, 71)), ((157, 71), (95, 71)), ((95, 71), (95, 51)), ((95, 51), (128, 51)), ((128, 51), (128, 104)), ((128, 104), (179, 104))]
>>> get_points(["U98","R91","D20","R16","D67","R40","U7","R15","U6","R7"])
[((0, 0), (0, 98)), ((0, 98), (91, 98)), ((91, 98), (91, 78)), ((91, 78), (107, 78)), ((107, 78), (107, 11)), ((107, 11), (147, 11)), ((147, 11), (147, 18)), ((147, 18), (162, 18)), ((162, 18), (162, 24)), ((162, 24), (169, 24))]
"""
starting_point = (0, 0)
result = []
for part in wire:
end_point = get_end_point(starting_point, part)
result.append((starting_point, end_point))
starting_point = end_point
return result | 8f0e7bad7500b8113d6ce601c6f2af472192774f | 14,180 |
def getcutscheckerboard(rho):
"""
:param rho:
:return: cell centers and values along horizontal, vertical, diag cut
"""
ny, nx = rho.shape
assert nx == ny
n = ny
horizontal = rho[6 * n // 7, :]
vertical = rho[:, n // 7]
if np.abs(horizontal[0]) < 1e-15:
horizontal = horizontal[2:-2]
if np.abs(vertical[0]) < 1e-15:
vertical = vertical[2:-2]
diag = [rho[i, i] for i in range(n)]
if np.abs(diag[0]) < 1e-15:
diag = diag[2:-2]
edges = np.linspace(0, 7, len(horizontal) + 1)
centers = (edges[1:] + edges[:-1]) / 2
return centers, horizontal, vertical, diag | 31d95160d1b34b50e616a346e04d5b6567886677 | 14,181 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.