content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import os
def custom_client_datalist_json_path(datalist_json_path: str, client_id: str, prefix: str) -> str:
"""
Customize datalist_json_path for each client
Args:
datalist_json_path: default datalist_json_path
client_id: e.g., site-2
"""
# Customize datalist_json_path for each client
# - client_id: e.g. site-5
head, tail = os.path.split(datalist_json_path)
datalist_json_path = os.path.join(
head,
prefix + "_" + str(client_id) + ".json",
)
return datalist_json_path | fb21864a18e105bbfe73cbec75ebcfd4ac52910b | 17,700 |
from typing import List
def parse_mint_studies_response(xml_raw) -> List[MintStudy]:
"""Parse the xml response to a MINT find DICOM studies call
Raises
------
DICOMTrolleyError
If parsing fails
"""
try:
studies = ElementTree.fromstring(xml_raw).findall(
MintStudy.xml_element
)
except ParseError as e:
raise DICOMTrolleyError(
f"Could not parse server response as MINT "
f"studies. Response was: {xml_raw}"
) from e
return [MintStudy.init_from_element(x) for x in studies] | 465d9156be75144bacd1c84316660ea48a3f276e | 17,701 |
def lookup_no_interp(x, dx, xi, y, dy, yi):
"""
Return the indices for the closest values for a look-up table
Choose the closest point in the grid
x ... range of x values
xi ... interpolation value on x-axis
dx ... grid width of x ( dx = x[1]-x[0])
(same for y)
return: idxX and idxY
"""
if xi > x[0] and xi < x[-1]:
xid = (xi - x[0]) / dx
xid_floor = np.floor(xid)
if xid - xid_floor < dx / 2:
idxX = xid_floor
else:
idxX = xid_floor + 1
elif xi < x[0]:
idxX = 0
else:
idxX = len(x) - 1
if yi > y[0] and yi < y[-1]:
yid = (yi - y[0]) / dy
yid_floor = np.floor(yid)
if yid - yid_floor < dy / 2:
idxY = yid_floor
else:
idxY = yid_floor + 1
elif yi < y[0]:
idxY = 0
else:
idxY = len(y) - 1
return idxX, idxY | cdee658cc50af9ba25902bdbe4274cd49a5c5d89 | 17,702 |
def advertisement_data_complete_builder(list_of_ad_entries):
"""
Generate a finalized advertisement data value from a list of AD entries that can be passed
to the BLEConnectionManager to set the advertisement data that is sent during advertising.
:param list_of_ad_entries: List of AD entries (can be built using blesuite.utils.gap_utils.advertisement_data_entry_builder)
:type list_of_ad_entries: [str,]
:return: Finalized AD data
:rtype: str
"""
data = ""
for ad in list_of_ad_entries:
length = len(ad)
ad_string = chr(length) + ad
data = data + ad_string
return data | c0f9040c36216cb519706c347d6644405fae0b7f | 17,703 |
def process_vocab_table(vocab,
vocab_size,
vocab_threshold,
vocab_lookup,
unk,
pad):
"""process vocab table"""
default_vocab = [unk, pad]
if unk in vocab:
del vocab[unk]
if pad in vocab:
del vocab[pad]
vocab = { k: vocab[k] for k in vocab.keys() if vocab[k] >= vocab_threshold }
if vocab_lookup is not None:
vocab = { k: vocab[k] for k in vocab.keys() if k in vocab_lookup }
sorted_vocab = sorted(vocab, key=vocab.get, reverse=True)
sorted_vocab = default_vocab + sorted_vocab
vocab_table = sorted_vocab[:vocab_size]
vocab_size = len(vocab_table)
vocab_index = tf.contrib.lookup.index_table_from_tensor(
mapping=tf.constant(vocab_table), default_value=0)
vocab_inverted_index = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping=tf.constant(vocab_table), default_value=unk)
return vocab_table, vocab_size, vocab_index, vocab_inverted_index | fa4860aac095d531e39008da99d42059e38716ec | 17,704 |
def get_mag_msg(stamp, mag):
"""
Get magnetometer measurement as ROS sensor_msgs::MagneticField
"""
# init:
mag_msg = MagneticField()
# a. set header:
mag_msg.header.stamp = stamp
mag_msg.header.frame_id = '/imu_link'
# b. mag:
(
mag_msg.magnetic_field.x,
mag_msg.magnetic_field.y,
mag_msg.magnetic_field.z
) = mag
# finally:
return mag_msg | ffa661ae168136fcbf626e08f85e19ba356a2e26 | 17,705 |
async def UserMeAPI(
current_user: User = Depends(User.getCurrentUser),
):
"""
現在ログイン中のユーザーアカウントの情報を取得する。<br>
JWT エンコードされたアクセストークンがリクエストの Authorization: Bearer に設定されていないとアクセスできない。
"""
# 一番よく使う API なので、リクエスト時に twitter_accounts テーブルに仮のアカウントデータが残っていたらすべて消しておく
## Twitter 連携では途中で連携をキャンセルした場合に仮のアカウントデータが残置されてしまうので、それを取り除く
if await TwitterAccount.filter(icon_url='Temporary').count() > 0:
await TwitterAccount.filter(icon_url='Temporary').delete()
current_user = await User.filter(id=current_user.id).get() # current_user のデータを更新
await current_user.fetch_related('twitter_accounts')
return current_user | 0a884c54d1e01b5ae9a31848b081566d35830de6 | 17,706 |
def midpt(pt1, pt2):
""" Get the midpoint for two arbitrary points in space. """
return rg.Point3d((pt1[0] + pt2[0])/2, (pt1[1] + pt2[1])/2, (pt1[2] + pt2[2])/2 ) | 324e9fd6fe6ea257a130fcfe51eb73bf0957e57c | 17,707 |
from collections import defaultdict
from astropy.io import fits
import siteUtils
from bot_eo_analyses import make_file_prefix, glob_pattern,\
from bot_data_handling import most_common_dark_files
def dark_current_jh_task(det_name):
"""JH version of single sensor execution of the dark current task."""
get_amplifier_gains, bias_filename, dark_current_task,\
plot_ccd_total_noise, get_mask_files
run = siteUtils.getRunNumber()
file_prefix = make_file_prefix(run, det_name)
acq_jobname = siteUtils.getProcessName('BOT_acq')
dark_files \
= siteUtils.dependency_glob(glob_pattern('dark_current', det_name),
acq_jobname=acq_jobname,
description="Dark current frames:")
if not dark_files:
print("dark_current_task: No dark files found for detector", det_name)
return None
dark_files_linear_fit = list(dark_files)
dark_files = most_common_dark_files(dark_files)
if len(dark_files_linear_fit) == len(dark_files):
# These data only have one integration time, so skip linear
# fit of dark current signal vs integration time.
dark_files_linear_fit = None
mask_files = get_mask_files(det_name)
eotest_results_file \
= siteUtils.dependency_glob('{}_eotest_results.fits'.format(file_prefix),
jobname='read_noise_BOT')[0]
gains = get_amplifier_gains('{}_eotest_results.fits'.format(file_prefix))
bias_frame = bias_filename(run, det_name)
dark_curr_pixels, dark95s \
= dark_current_task(run, det_name, dark_files, gains,
mask_files=mask_files, bias_frame=bias_frame,
dark_files_linear_fit=dark_files_linear_fit)
plot_ccd_total_noise(run, det_name, dark_curr_pixels, dark95s,
eotest_results_file)
return dark_curr_pixels, dark95s | a2d627b21340382018826bb583ad31c8509f9bbe | 17,708 |
import re
def get_sid_list (video_source_filename):
"""This returns a list of subtitle ids in the source video file.
TODO: Also extract ID_SID_nnn_LANG to associate language. Not all DVDs include this.
"""
cmd = "mplayer '%s' -vo null -ao null -frames 0 -identify" % video_source_filename
(command_output, exitstatus) = run(cmd)
idl = re.findall("ID_SUBTITLE_ID=([0-9]+)", command_output)
idl.sort()
return idl | adcbb7e3790c12fd55b17cca2d4ffd87593a78d0 | 17,709 |
def tweetnacl_crypto_box_open(max_messagelength=256):
"""
max_messagelength: maximum length of the message, in bytes.
i.e., the symbolic execution will not consider messages longer than max_messagelength
"""
proj = tweetnaclProject()
state = funcEntryState(proj, "crypto_box_curve25519xsalsa20poly1305_tweet_open", [
("m", pointerToUnconstrainedPublic()), # Output parameter, will hold plaintext, length 'clen'
("c", pointerToUnconstrainedPublic()), # ciphertext: length 'clen'
("clen", publicValue()), # length of ciphertext. Not a pointer
("n", pointerTo(secretArray(24), 24)), # nonce, size crypto_box_NONCEBYTES
("pk", pointerTo(publicArray(32), 32)), # public key, size crypto_box_PUBLICKEYBYTES
("sk", pointerTo(secretArray(32), 32)) # secret key, size crypto_box_SECRETKEYBYTES
])
state.add_constraints(getArgBVS(state, 'clen') <= max_messagelength)
addDevURandom(state)
return (proj, state) | 5b69127c70d3286c2c54b898d541db9f90c1ff51 | 17,710 |
def balanced_parentheses_checker(symbol_string):
"""Verify that a set of parentheses is balanced."""
opening_symbols = '{[('
closing_symbols = '}])'
opening_symbols_stack = data_structures.Stack()
symbol_count = len(symbol_string)
counter = 0
while counter < symbol_count:
current_symbol = symbol_string[counter]
if current_symbol in '{[(':
opening_symbols_stack.push(current_symbol)
else:
if not opening_symbols_stack.is_empty() and \
opening_symbols.index(opening_symbols_stack.peek()) == \
closing_symbols.index(current_symbol):
opening_symbols_stack.pop()
else:
counter = symbol_count
counter += 1
return opening_symbols_stack.is_empty() and counter == symbol_count | 04624d403f5af94c42122258df28363cb8bcf20d | 17,711 |
from typing import Optional
def _wrap_outcoming(
store_cls: type, wrapped_method: str, trans_func: Optional[callable] = None
):
"""Output-transforming wrapping of the wrapped_method of store_cls.
The transformation is given by trans_func, which could be a one (trans_func(x)
or two (trans_func(self, x)) argument function.
Args:
store_cls: The class that will be transformed
wrapped_method: The method (name) that will be transformed.
trans_func: The transformation function.
wrap_arg_idx: The index of the
Returns: Nothing. It transforms the class in-place
>>> from on.trans import store_wrap
>>> S = store_wrap(dict)
>>> _wrap_outcoming(S, '_key_of_id', lambda x: f'wrapped_{x}')
>>> s = S({'a': 1, 'b': 2})
>>> list(s)
['wrapped_a', 'wrapped_b']
>>> _wrap_outcoming(S, '_key_of_id', lambda self, x: f'wrapped_{x}')
>>> s = S({'a': 1, 'b': 2}); assert list(s) == ['wrapped_a', 'wrapped_b']
>>> class A:
... def __init__(self, prefix='wrapped_'):
... self.prefix = prefix
... def _key_of_id(self, x):
... return self.prefix + x
>>> _wrap_outcoming(S, '_key_of_id', A(prefix='wrapped_')._key_of_id)
>>> s = S({'a': 1, 'b': 2}); assert list(s) == ['wrapped_a', 'wrapped_b']
>>>
>>> S = store_wrap(dict)
>>> _wrap_outcoming(S, '_obj_of_data', lambda x: x * 7)
>>> s = S({'a': 1, 'b': 2})
>>> list(s.values())
[7, 14]
"""
if trans_func is not None:
wrapped_func = getattr(store_cls, wrapped_method)
if not _has_unbound_self(trans_func):
# print(f"00000: {store_cls}: {wrapped_method}, {trans_func}, {wrapped_func}, {wrap_arg_idx}")
@wraps(wrapped_func)
def new_method(self, x):
# # Long form (for explanation)
# super_method = getattr(super(store_cls, self), wrapped_method)
# output_of_super_method = super_method(x)
# transformed_output_of_super_method = trans_func(output_of_super_method)
# return transformed_output_of_super_method
return trans_func(
getattr(super(store_cls, self), wrapped_method)(x)
)
else:
# print(f"11111: {store_cls}: {wrapped_method}, {trans_func}, {wrapped_func}, {wrap_arg_idx}")
@wraps(wrapped_func)
def new_method(self, x):
# # Long form (for explanation)
# super_method = getattr(super(store_cls, self), wrapped_method)
# output_of_super_method = super_method(x)
# transformed_output_of_super_method = trans_func(self, output_of_super_method)
# return transformed_output_of_super_method
return trans_func(
self, getattr(super(store_cls, self), wrapped_method)(x)
)
setattr(store_cls, wrapped_method, new_method) | 5ea4782f528c7822d8906cde415cc318353b54ba | 17,712 |
import math
def quantize(x):
"""convert a float in [0,1] to an int in [0,255]"""
y = math.floor(x*255)
return y if y<256 else 255 | b941a11d0d6af3162c964568e2d97c8d81cd1442 | 17,713 |
import logging
def initialize_logger(prefix):
"""
Initialization of logging subsystem. Two logging handlers are brought up:
'fh' which logs to a log file and 'ch' which logs to standard output.
:param prefix: prefix that is added to the filename
:return logger: return a logger instance
"""
logger = logging.getLogger('charm-cli')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
try:
if prefix:
log_filename = '{}_charm-cli.log'.format(prefix)
else:
log_filename = 'charm-cli.log'
fh = logging.FileHandler(log_filename, 'w')
fh.setLevel(logging.INFO)
logger.addHandler(fh)
except IOError as error:
logger.warning('WARNING: Cannot create log file! Run charm-cli from a directory to '
'which you have write access.')
logger.warning(error.msg)
pass
return logger | a6883736b17b9dc213bf4d26fd153fc8d0e11025 | 17,714 |
def interpolate(x,
x_data,
y_data,
left_slope=0.0,
right_slope=0.0,
dtype=None,
name=None):
"""Performs linear interpolation for supplied points.
Given a set of knots whose x- and y- coordinates are in `x_data` and `y_data`,
this function returns y-values for x-coordinates in `x` via piecewise
linear interpolation.
`x_data` must be strictly increasing but `y_data` don't need to be because we
don't require the function approximated by these knots to be monotonic.
#### Examples
```python
x = [-10, -1, 1, 3, 6, 7, 8, 15, 18, 25, 30, 35]
# `x_data` must be increasing, but `y_data` don't need to be.
x_data = [-1, 2, 6, 8, 18, 30.0]
y_data = [10, -1, -5, 7, 9, 20]
result = interpolate(x, x_data, y_data)
with tf.Session() as sess:
print(sess.run(result))
# [ 10, 10, 2.66666667, -2, -5, 1, 7, 8.4, 9, 15.41666667, 20, 20]
```
Args:
x: x-coordinates for which we need to get interpolation. A 1-D `Tensor` of
real dtype.
x_data: x coordinates. A 1-D `Tensor` of real dtype. Should be sorted in
increasing order.
y_data: y coordinates. A 1-D `Tensor` of real dtype. Should have the
compatible shape as `x_data`.
left_slope: The slope to use for extrapolation with x-coordinate smaller
than the min `x_data`. It's a 0-D `Tensor`. If not supplied, the default
will be 0, meaning constant extrapolation, i.e. extrapolated value will be
the leftmost `y_data`.
right_slope: The slope to use for extrapolation with x-coordinate greater
than the max `x_data`. It's a 0-D `Tensor`. If not supplied, the default
will be 0, meaning constant extrapolation, i.e. extrapolated value will be
the rightmost `y_data`.
dtype: Optional tf.dtype for `x`, x_data`, `y_data`, `left_slope` and
`right_slope`. If not specified, the dtype of the inputs will be used.
name: Python str. The name prefixed to the ops created by this function. If
not supplied, the default name 'linear_interpolation' is used.
Returns:
A 1-D `Tensor` of real dtype corresponding to the x-values in `x`.
"""
with tf.name_scope(
name,
default_name='linear_interpolation',
values=[x, x_data, y_data, left_slope, right_slope]):
x = tf.convert_to_tensor(x, dtype=dtype)
x_data = tf.convert_to_tensor(x_data, dtype=dtype)
y_data = tf.broadcast_to(
tf.convert_to_tensor(y_data, dtype=dtype), shape=tf.shape(x_data))
left_slope = tf.convert_to_tensor(left_slope, dtype=dtype)
right_slope = tf.convert_to_tensor(right_slope, dtype=dtype)
# TODO(b/130141692): add batching support.
x_data_is_rank_1 = tf.assert_rank(x_data, 1)
with tf.compat.v1.control_dependencies([x_data_is_rank_1]):
# Get upper bound indices for `x`.
upper_indices = tf.searchsorted(x_data, x, side='left', out_type=tf.int32)
x_data_size = tf.shape(x_data)[-1]
at_min = tf.equal(upper_indices, 0)
at_max = tf.equal(upper_indices, x_data_size)
# Create tensors in order to be used by `tf.where`.
# `values_min` are extrapolated values for x-coordinates less than or
# equal to `x_data[0]`.
# `values_max` are extrapolated values for x-coordinates greater than
# `x_data[-1]`.
values_min = y_data[0] + left_slope * (
x - tf.broadcast_to(x_data[0], shape=tf.shape(x)))
values_max = y_data[-1] + right_slope * (
x - tf.broadcast_to(x_data[-1], shape=tf.shape(x)))
# `tf.where` evaluates all branches, need to cap indices to ensure it
# won't go out of bounds.
capped_lower_indices = tf.math.maximum(upper_indices - 1, 0)
capped_upper_indices = tf.math.minimum(upper_indices, x_data_size - 1)
x_data_lower = tf.gather(x_data, capped_lower_indices)
x_data_upper = tf.gather(x_data, capped_upper_indices)
y_data_lower = tf.gather(y_data, capped_lower_indices)
y_data_upper = tf.gather(y_data, capped_upper_indices)
# Nan in unselected branches could propagate through gradient calculation,
# hence we need to clip the values to ensure no nan would occur. In this
# case we need to ensure there is no division by zero.
x_data_diff = x_data_upper - x_data_lower
floor_x_diff = tf.where(at_min | at_max, x_data_diff + 1, x_data_diff)
interpolated = y_data_lower + (x - x_data_lower) * (
y_data_upper - y_data_lower) / floor_x_diff
interpolated = tf.where(at_min, values_min, interpolated)
interpolated = tf.where(at_max, values_max, interpolated)
return interpolated | 38c1f3eff92a203894b45add718c354e431c9b03 | 17,715 |
from typing import Optional
import pickle
def login(token_path: str) -> Optional[Credentials]:
"""
Trigger the authentication so that we can store a new token.pickle.
"""
flow = InstalledAppFlow.from_client_secrets_file(
'gcal/credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(token_path, 'wb') as token:
pickle.dump(creds, token)
return creds | 72c7164297cfc17c661253f9496f8323cc3f217c | 17,716 |
import requests
def get_auth_token(context, scope):
"""
Get a token from the auth service to allow access to a service
:param context: context of the test
:return: the token
"""
secret = get_client_secret(context)
data = {
'grant_type': 'client_credentials',
'scope': scope
}
response = requests.post(
'{}/token'.format(context.services['auth']),
data=data,
headers={'Content-Type': 'application/x-www-form-urlencoded'},
timeout=REQUEST_TIMEOUT,
verify=context.keychain['CA_CRT'],
auth=(context.client_id, secret)
)
return response.json()['access_token'] | d36e66ed08f637f93b2f9226e7ccaeb9cbe07a2c | 17,717 |
def getDefaultFontFamily():
"""Returns the default font family of the application"""
return qt.QApplication.instance().font().family() | 408aa406d09dcc788bff46c3346307713f5b0fdf | 17,718 |
def result(a, b, operator):
"""This function return result"""
lambda_ops = {
"+": (lambda x,y: x+y),
"-": (lambda x,y: x-y),
"*": (lambda x,y: x*y),
"/": (lambda x,y: x/y),
"//": (lambda x,y: x//y),
"%": (lambda x,y: x%y),
}
r = False
error = ''
if operator in lambda_ops:
if (operator == "/" or operator == "//" or operator == "%" ) and b==0:
error = "Oops, division or modulo by zero"
else:
r = lambda_ops[operator](a, b)
else:
error = "Use either + - * / or % next time"
return r, error | febfacf3aa94bc15931cf79979329b3b1a5c7bc5 | 17,719 |
import torch
def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False):
"""Performs non-maximum suppression in a batched fashion.
Modified from https://github.com/pytorch/vision/blob
/505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39.
In order to perform NMS independently per class, we add an offset to all
the boxes. The offset is dependent only on the class idx, and is large
enough so that boxes from different classes do not overlap.
Arguments:
boxes (torch.Tensor): boxes in shape (N, 4).
scores (torch.Tensor): scores in shape (N, ).
idxs (torch.Tensor): each index value correspond to a bbox cluster,
and NMS will not be applied between elements of different idxs,
shape (N, ).
nms_cfg (dict): specify nms type and other parameters like iou_thr.
Possible keys includes the following.
- iou_thr (float): IoU threshold used for NMS.
- split_thr (float): threshold number of boxes. In some cases the
number of boxes is large (e.g., 200k). To avoid OOM during
training, the users could set `split_thr` to a small value.
If the number of boxes is greater than the threshold, it will
perform NMS on each group of boxes separately and sequentially.
Defaults to 10000.
class_agnostic (bool): if true, nms is class agnostic,
i.e. IoU thresholding happens over all boxes,
regardless of the predicted class.
Returns:
tuple: kept dets and indice.
"""
nms_cfg_ = nms_cfg.copy()
class_agnostic = nms_cfg_.pop('class_agnostic', class_agnostic)
if class_agnostic:
boxes_for_nms = boxes
else:
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
with no_nncf_trace():
# NB: this trick is required to make class-separate NMS using ONNX NMS operation;
# the ONNX NMS operation supports another way of class separation (class-separate scores), but this is not used here.
# Note that `not_nncf_trace` is required here, since this trick causes accuracy degradation in case of int8 quantization:
# if the output of the addition below is quantized, the maximal output value is about
# ~ max_value_in_inds * max_coordinate,
# usually this value is big, so after int8-quantization different small bounding
# boxes may be squashed into the same bounding box, this may cause accuracy degradation.
# TODO: check if it is possible in this architecture use class-separate scores that are supported in ONNX NMS.
boxes_for_nms = boxes + offsets[:, None]
nms_type = nms_cfg_.pop('type', 'nms')
nms_op = get_nms_from_type(nms_type)
split_thr = nms_cfg_.pop('split_thr', 10000)
# Won't split to multiple nms nodes when exporting to onnx
if boxes_for_nms.shape[0] < split_thr or (torch.onnx.is_in_onnx_export() or is_in_nncf_tracing):
dets, keep = nms_op(boxes_for_nms, scores, **nms_cfg_)
boxes = boxes[keep]
scores = dets[:, 4]
else:
total_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
for id in torch.unique(idxs):
mask = (idxs == id).nonzero(as_tuple=False).view(-1)
dets, keep = nms_op(boxes_for_nms[mask], scores[mask], **nms_cfg_)
total_mask[mask[keep]] = True
keep = total_mask.nonzero(as_tuple=False).view(-1)
keep = keep[scores[keep].argsort(descending=True)]
boxes = boxes[keep]
scores = scores[keep]
return torch.cat([boxes, scores[:, None]], -1), keep | 153e9d8aba307c0a63cc6cc3045bb004f3c8445f | 17,720 |
def get_deconv_filter(f_shape):
"""
reference: https://github.com/MarvinTeichmann/tensorflow-fcn
"""
width = f_shape[0]
heigh = f_shape[0]
f = ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([f_shape[0], f_shape[1]])
for x in range(width):
for y in range(heigh):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(f_shape, dtype=np.float32)
for i in range(f_shape[2]):
weights[:, :, i, i] = bilinear
return weights | ec0a5617ad149708d195ab4701860f12ef695de1 | 17,721 |
def twitter_split_handle_from_txt(tweet):
"""
Looks for RT @twitterhandle: or just @twitterhandle in the beginning of the tweet.
The handle is split off and returned as two separate strings.
:param tweet: (str)
The tweet text to split.
:return: (str, str)
twitter_handle, rest_of_tweet
"""
match = TWEET_TARGET_RE.search(tweet)
if match is not None:
match = match.group()
tweet = tweet.replace(match, '')
return match, tweet | 1f98e5e5f2c1369ca673e6b15114f379786d1e8f | 17,722 |
def points(piece_list):
"""Calculating point differential for the given board state"""
# Args: (1) piece list
# Returns: differential (white points - black points)
# The points are calculated via the standard chess value system:
# Pawn = 1, King = 3, Bishop = 3, Rook = 5, Queen = 9
# King = 100 (arbitrarily large)
differential = 0
# For all white pieces...
for i in range(0,16):
# If the piece is active, add its points to the counter
if piece_list[i].is_active:
differential = differential + piece_list[i].value
# For all black pieces...
for i in range(16,32):
# If the piece is active, subtract its points from the counter
if piece_list[i].is_active:
differential = differential - piece_list[i].value
# Return point differential
return differential | d8f36fd887a846a20999a0a99ad672d2902473d4 | 17,723 |
def grid_sampler(x, grid, name=None):
"""
:alias_main: paddle.nn.functional.grid_sampler
:alias: paddle.nn.functional.grid_sampler,paddle.nn.functional.vision.grid_sampler
:old_api: paddle.fluid.layers.grid_sampler
This operation samples input X by using bilinear interpolation based on
flow field grid, which is usually generated by :code:`affine_grid` . The grid of
shape [N, H, W, 2] is the concatenation of (x, y) coordinates
with shape [N, H, W] each, where x is indexing the 4th dimension
(in width dimension) of input data x and y is indexing the 3rd
dimension (in height dimension), finally results is the bilinear
interpolation value of 4 nearest corner points. The output tensor
shape will be [N, C, H, W].
.. code-block:: text
Step 1:
Get (x, y) grid coordinates and scale to [0, H-1/W-1].
.. code-block:: text
grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1)
grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1)
Step 2:
Indices input data X with grid (x, y) in each [H, W] area, and bilinear
interpolate point value by 4 nearest points.
wn ------- y_n ------- en
| | |
| d_n |
| | |
x_w --d_w-- grid--d_e-- x_e
| | |
| d_s |
| | |
ws ------- y_s ------- wn
x_w = floor(x) // west side x coord
x_e = x_w + 1 // east side x coord
y_n = floor(y) // north side y coord
y_s = y_s + 1 // south side y coord
d_w = grid_x - x_w // distance to west side
d_e = x_e - grid_x // distance to east side
d_n = grid_y - y_n // distance to north side
d_s = y_s - grid_y // distance to south side
wn = X[:, :, y_n, x_w] // north-west point value
en = X[:, :, y_n, x_e] // north-east point value
ws = X[:, :, y_s, x_w] // south-east point value
es = X[:, :, y_s, x_w] // north-east point value
output = wn * d_e * d_s + en * d_w * d_s
+ ws * d_e * d_n + es * d_w * d_n
Args:
x(Variable): The input tensor, which is a 4-D tensor with shape
[N, C, H, W], N is the batch size, C is the channel
number, H and W is the feature height and width.
The data type is float32 or float64.
grid(Variable): Input grid tensor of shape [N, H, W, 2]. The
data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: Output of shape [N, C, H, W] data samples input X
using bilnear interpolation based on input grid.
The data type is same as input tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# use with affine_grid
x = fluid.data(name='x', shape=[None, 10, 32, 32], dtype='float32')
theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32')
grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32])
out = fluid.layers.grid_sampler(x=x, grid=grid)
"""
helper = LayerHelper("grid_sampler", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sampler')
check_variable_and_dtype(grid, 'grid', ['float32', 'float64'],
'grid_sampler')
if not isinstance(x, Variable):
return ValueError("The x should be a Variable")
if not isinstance(grid, Variable):
return ValueError("The grid should be a Variable")
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x, 'Grid': grid}
helper.append_op(type='grid_sampler', inputs=ipts, outputs={'Output': out})
return out | 06c57d6e6d0a476b42b2472284368352fbd48bc3 | 17,724 |
import os
import subprocess
def fpack (filename):
"""fpack fits images; skip fits tables"""
try:
# fits check if extension is .fits and not an LDAC fits file
if filename.split('.')[-1] == 'fits' and '_ldac.fits' not in filename:
header = read_hdulist(filename, get_data=False, get_header=True,
ext_name_indices=0)
# check if it is an image
if int(header['NAXIS'])==2:
# determine if integer or float image
if int(header['BITPIX']) > 0:
cmd = ['fpack', '-D', '-Y', '-v', filename]
else:
if 'Scorr' in filename or 'limmag' in filename:
quant = 1
else:
quant = 16
cmd = ['fpack', '-q', str(quant), '-D', '-Y', '-v', filename]
# if output fpacked file already exists, delete it
filename_packed = '{}.fz'.format(filename)
if os.path.exists(filename_packed):
os.remove(filename_packed)
log.warning ('fpacking over already existing file {}'
.format(filename_packed))
subprocess.run(cmd)
filename = filename_packed
except Exception as e:
#log.exception (traceback.format_exc())
log.exception ('exception was raised in fpacking of image {}: {}'
.format(filename,e))
return filename | 6b02bdf88130a00bffc216c68f714a1719b306ca | 17,725 |
def SpecSwitch(spec_id):
"""
Create hotkey function that switches hotkey spec.
:param spec_id: Hotkey spec ID or index.
:return: Hotkey function.
"""
# Create hotkey function that switches hotkey spec
func = partial(spec_switch, spec_id)
# Add `call in main thread` tag
func = tag_call_in_main_thread(func)
# Return the hotkey function
return func | 20ba2ae59d717866474c236d0d97273755a035c8 | 17,726 |
def get_package_version() -> str:
"""Returns the package version."""
metadata = importlib_metadata.metadata(PACKAGE_NAME) # type: ignore
version = metadata["Version"]
return version | a24286ef2a69f60871b41eda8e5ab39ba7f756c0 | 17,727 |
def safe_name(dbname):
"""Returns a database name with non letter, digit, _ characters removed."""
char_list = [c for c in dbname if c.isalnum() or c == '_']
return "".join(char_list) | 2ce4978c3467abaddf48c1d1ab56ed773b335652 | 17,728 |
def _parse_mro(mro_file_name):
"""Parse an MRO file into python objects."""
# A few helpful pyparsing constants
EQUALS, SEMI, LBRACE, RBRACE, LPAREN, RPAREN = map(pp.Suppress, '=;{}()')
mro_label = pp.Word(pp.alphanums + '_')
mro_modifier = pp.oneOf(["in", "out", "src"])
mro_type = pp.oneOf([
"bool", "bool[]",
"int", "int[]",
"float", "float[]",
"map", "map[]",
"string", "string[]", "string[][]",
"path", "path[]",
"py"] + \
utils.MARTIAN_FILETYPES + [x +'[]' for x in utils.MARTIAN_FILETYPES])
# First parse includes
include = pp.Literal("@include").suppress() + pp.quotedString
includes = pp.ZeroOrMore(include).setResultsName("includes")
includes.addParseAction(pp.removeQuotes)
# Then parse filetypes
filetype = pp.Literal("filetype").suppress() + pp.oneOf(utils.MARTIAN_FILETYPES) + SEMI
filetypes = pp.ZeroOrMore(filetype).setResultsName("filetypes")
#####################################################
# Stage
#####################################################
# Now define the parts of a stage
# First we have a "stage entry", which is a line in the stage body, it looks like "in int lane"
stage_entry = pp.Group(mro_modifier + mro_type + pp.Optional(pp.Word(pp.printables, excludeChars=',')) + pp.Optional(pp.QuotedString('"')))
# Note that stage entries a comma-delimited, but there's a trailing comma so we need the
# pp.Empty option for matching
stage_entries = pp.delimitedList(pp.Or([stage_entry, pp.Empty()]))
# Each stage can have two parts, the main part and a "split using" part
split = (pp.Literal("split using").suppress() + LPAREN +
pp.Optional(pp.Group(stage_entries).setResultsName("split")) + RPAREN)
stage = pp.Group(pp.Literal("stage").suppress() + mro_label + LPAREN +
pp.Group(stage_entries).setResultsName("stage_entries") +
RPAREN +
pp.Optional(split))
# Now create a dict of the stages, with the MRO labels for keys
stages = pp.Dict(pp.ZeroOrMore(stage)).setResultsName("stages")
#####################################################
# Pipeline
#####################################################
## Calls
call_entry = pp.Group(pp.Word(pp.printables, excludeChars="=") + EQUALS +
pp.Word(pp.printables, excludeChars=','))
call_entries = pp.delimitedList(pp.Or([call_entry, pp.Empty()]))
call_modifier = pp.oneOf(["local", "preflight"])
call = pp.Group(pp.Literal("call").suppress() + pp.ZeroOrMore(call_modifier).suppress() +
mro_label + LPAREN + pp.Group(call_entries).setResultsName("call_entries") +
RPAREN)
calls = pp.Dict(pp.ZeroOrMore(call)).setResultsName("pipeline_calls")
## Return
return_entry = call_entry
return_entries = pp.delimitedList(pp.Or([return_entry, pp.Empty()]))
return_ = (pp.Literal("return").suppress() + LPAREN +
pp.Group(return_entries).setResultsName("pipeline_return") + RPAREN)
## Pipeline header
pipeline_header_entry = pp.Group(mro_modifier + mro_type +
pp.Word(pp.printables, excludeChars=",") +
pp.Optional(pp.quotedString))
pipeline_header_entries = pp.delimitedList(pp.Or([pipeline_header_entry, pp.Empty()]))
pipeline = (pp.Literal("pipeline").suppress() + mro_label.setResultsName("pipeline_name") +
LPAREN + pp.Group(pipeline_header_entries).setResultsName("pipeline_header") +
RPAREN + LBRACE + calls + return_ + RBRACE)
mro_file = pp.Each([pp.Optional(includes), filetypes, stages, pp.Optional(pipeline)])
mro_file.ignore(pp.pythonStyleComment)
result = mro_file.parseFile(mro_file_name)
return result | cf2561d1b72c2899fa495c2e83b683b7980b47ab | 17,729 |
import math
def tabulate_stats(stats: rl_common.Stats) -> str:
"""Pretty-prints the statistics in `stats` in a table."""
res = []
for (env_name, (reward_type, reward_path)), vs in stats.items():
for seed, (x, _log_dir) in enumerate(vs):
row = {
"env_name": env_name,
"reward_type": reward_type,
"reward_path": reward_path,
"seed": seed,
}
row.update(x)
filtered_row = {}
for k, v in row.items():
if k.endswith("_std"):
k = k[:-4] + "_se"
v = v / math.sqrt(row["n_traj"])
new_k = _filter_key(k)
if new_k is not None:
filtered_row[new_k] = v
res.append(filtered_row)
return tabulate.tabulate(res, headers="keys") | e853de6ac15e639d7348ee5a423afbdbbf296e7f | 17,730 |
def LLR_binom(k, n, p0, EPS=1E-15):
""" Log likelihood ratio test statistic for the single binomial pdf.
Args:
k : number of counts (numpy array)
n : number of trials
p0 : null hypothesis parameter value
Returns:
individual log-likelihood ratio values
"""
phat = k/n # maximum likelihood estimate
phat[phat < EPS] = 2*EPS
# Log-likelihood (density) ratios
LLR = 2*( (k*np.log(phat)+(n-k)*np.log(1-phat)) - (k*np.log(p0)+(n-k)*np.log(1-p0)))
return LLR | a423b81a374398b88881ee45a665e7f9a648c4c1 | 17,731 |
def concatenate_shifts(shifts):
""" Take the shifts, which are relative to the previous shift,
and sum them up so that all of them are relative to the first."""
# the first shift is 0,0,0
for i in range(2, len(shifts)): # we start at the third
s0 = shifts[i-1]
s1 = shifts[i]
s1.x += s0.x
s1.y += s0.y
s1.z += s0.z
return shifts | f4b0a41db1db78e3b5f25ca198fdb6cebd6476ca | 17,732 |
import json
import hashlib
def users(user_id=None, serialize=True):
"""
The method returns users in a json responses. The json is hashed to increase security.
:param serialize: Serialize helps indicate the format of the response
:param user_id: user id intended to be searched
:return: Json format or plain text depending in the serialize parameter
"""
users = DATA_CONTROLLER.get_user_by_id(user_id=user_id, serialize=True)
page = request.args.get("limit")
number_of_pages = None
pages = []
if page:
number_of_pages = int(ceil(float(len(users)) / PAGE_SIZE))
converted_page = int(page)
if converted_page > number_of_pages or converted_page < 0:
return make_response("", 404)
from_index = (converted_page - 1) * PAGE_SIZE
to_index = from_index + PAGE_SIZE
users = users[from_index:to_index]
if number_of_pages:
pages = range(1, number_of_pages + 1)
if serialize:
data = {
"users": users,
"total": len(users),
"pages": pages
}
json_data = json.dumps(data)
response = make_response(jsonify(data), 200)
# Caching
response.headers["ETag"] = str(hashlib.sha256(json_data).hexdigest())
# Entity tag uniquely identifies request
response.headers["Cache-Control"] = "private, max-age=300"
return response | b939860a7e8794f8e53f63594a3000a0425cb319 | 17,733 |
def course_units_my(user):
"""
Get all course units assign to a teacher available persisted in DB
:return: tuple with
- Course units data
- list of success messages
- list of error messages
"""
success = []
data = set([attendance.course_unit for attendance in
Attendance.objects.filter(creator=user).order_by('course_unit__name')])
success.append("Cadeiras do docente obtidas com sucesso")
general_log.debug(f"{__name__}->{course_units.__name__} {success[-1]}")
return data, success, [] | c2541ab4cdfe91ffba809ac6c295cdddd66ffa54 | 17,734 |
def get_world_size():
"""TODO Add missing docstring."""
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size() | f277e157466ea9ebd8d06b552ce1bc544cf78e60 | 17,735 |
def get_namenode_setting(namenode):
"""Function for getting the namenode in input as parameter setting from the configuration file.
Parameters
----------
namenode --> str, the namenode for which you want to get the setting info
Returns
-------
conf['namenodes_setting'][namenode] --> dict, the namenode setting info
"""
return conf['namenodes_setting'][namenode] | e389d7a20a7b0ef7f32b51c7bb31068cb152fc2b | 17,736 |
from typing import Tuple
def cds(identity: str, sequence: str, **kwargs) -> Tuple[sbol3.Component, sbol3.Sequence]:
"""Creates a Coding Sequence (CDS) Component and its Sequence.
:param identity: The identity of the Component. The identity of Sequence is also identity with the suffix '_seq'.
:param sequence: The DNA sequence of the Component encoded in IUPAC.
:param kwargs: Keyword arguments of any other Component attribute.
:return: A tuple of Component and Sequence.
"""
cds_component, cds_seq = dna_component_with_sequence(identity, sequence, **kwargs)
cds_component.roles. append(sbol3.SO_CDS)
return cds_component, cds_seq | 15d99917b840cf2881e1a90c1835c356622511a7 | 17,737 |
import os
import json
def all_scenes():
"""List all scenes
Returns:
list of Scene objects
"""
global _scene_json
if _scene_json is None:
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "scenes.json")) as fh:
_scene_json = json.loads(fh.read())
ret = []
for s in _scene_json:
ret.append(Scene(s["name"],
Room(s["room"],
Building(s["building"]),
RoomType(s["room_type"]))))
return ret | 815216bab4ed9cfdf6b88b4438a275d36beceaca | 17,738 |
import math
def calc_innovation(xEst, PEst, y, LMid):
"""
Compute innovation and Kalman gain elements
"""
# Compute predicted observation from state
lm = get_landmark_position_from_state(xEst, LMid)
delta = lm - xEst[0:2]
q = (delta.T @ delta)[0, 0]
y_angle = math.atan2(delta[1, 0], delta[0, 0]) - xEst[2, 0]
yp = np.array([[math.sqrt(q), pi_2_pi(y_angle)]])
# compute innovation, i.e. diff with real observation
innov = (y - yp).T # Yt-Yt*
innov[1] = pi_2_pi(innov[1])
# compute matrixes for Kalman Gain
H = jacob_h(q, delta, xEst, LMid)
S = H @ PEst @ H.T + Py
return innov, S, H | 146695c107c46d2736d0e4ecc191d9a399ca8159 | 17,739 |
from typing import Any
def next_key(basekey: str, keys: dict[str, Any]) -> str:
"""Returns the next unused key for basekey in the supplied dictionary.
The first try is `basekey`, followed by `basekey-2`, `basekey-3`, etc
until a free one is found.
"""
if basekey not in keys:
return basekey
i = 2
while f"{basekey}-{i}" in keys:
i = i + 1
return f"{basekey}-{i}" | e1da51c79fd465088294e053fdc970934268211b | 17,740 |
import yaml
def load_mmio_overhead_elimination_map(yaml_path):
"""
Load a previously dumped mmio overhead elimination map
"""
with open(yaml_path, "r") as yaml_file:
res = yaml.safe_load(yaml_file.read())
res_map = {
'overall': res[0]['overall'],
'per_model': res[1]['per_model'],
}
if len(res) > 2:
res_map['per_access_context'] = res[2]['per_access_context']
return res_map | ae0ead1aa8c9f26acad9a23a35791592efbfe47e | 17,741 |
from MoinMoin.util import diff_html
from MoinMoin.util import diff_text
def execute(pagename, request):
""" Handle "action=diff"
checking for either a "rev=formerrevision" parameter
or rev1 and rev2 parameters
"""
if not request.user.may.read(pagename):
Page(request, pagename).send_page()
return
try:
date = request.values['date']
try:
date = long(date) # must be long for py 2.2.x
except StandardError:
date = 0
except KeyError:
date = 0
try:
rev1 = int(request.values.get('rev1', -1))
except StandardError:
rev1 = 0
try:
rev2 = int(request.values.get('rev2', 0))
except StandardError:
rev2 = 0
if rev1 == -1 and rev2 == 0:
rev1 = request.rev
if rev1 is None:
rev1 = -1
# spacing flag?
ignorews = int(request.values.get('ignorews', 0))
_ = request.getText
# get a list of old revisions, and back out if none are available
currentpage = Page(request, pagename)
currentrev = currentpage.current_rev()
if currentrev < 2:
request.theme.add_msg(_("No older revisions available!"), "error")
currentpage.send_page()
return
if date: # this is how we get called from RecentChanges
rev1 = 0
log = editlog.EditLog(request, rootpagename=pagename)
for line in log.reverse():
if date >= line.ed_time_usecs and int(line.rev) != 99999999:
rev1 = int(line.rev)
break
else:
rev1 = 1
rev2 = 0
if rev1 > 0 and rev2 > 0 and rev1 > rev2 or rev1 == 0 and rev2 > 0:
rev1, rev2 = rev2, rev1
if rev1 == -1:
oldrev = currentrev - 1
oldpage = Page(request, pagename, rev=oldrev)
elif rev1 == 0:
oldrev = currentrev
oldpage = currentpage
else:
oldrev = rev1
oldpage = Page(request, pagename, rev=oldrev)
if rev2 == 0:
newrev = currentrev
newpage = currentpage
else:
newrev = rev2
newpage = Page(request, pagename, rev=newrev)
oldlog = oldpage.editlog_entry()
newlog = newpage.editlog_entry()
if not oldlog or not newlog:
# We use "No log entries found." msg because we already have i18n
# for that. Better would "At least one log entry was not found.".
request.theme.add_msg(_("No log entries found."), "error")
currentpage.send_page()
return
edit_count = abs(newrev - oldrev)
# Start output
# This action generates content in the user language
request.setContentLanguage(request.lang)
request.theme.send_title(_('Diff for "%s"') % (pagename, ), pagename=pagename, allow_doubleclick=1)
f = request.formatter
request.write(f.div(1, id="content"))
oldrev = oldpage.get_real_rev()
newrev = newpage.get_real_rev()
title = _('Differences between revisions %d and %d') % (oldrev, newrev)
if edit_count > 1:
title += ' ' + _('(spanning %d versions)') % (edit_count, )
title = f.text(title)
page_url = wikiutil.escape(currentpage.url(request), True)
def enabled(val):
return not val and u' disabled="disabled"' or u''
revert_html = ""
if request.user.may.revert(pagename):
revert_html = """
<form action="%s" method="get">
<div style="text-align:center">
<input name="action" value="revert" type="hidden">
<input name="rev" value="%d" type="hidden">
<input value="%s" type="submit"%s>
</div>
</form>
""" % (page_url, rev2, _("Revert to this revision"), enabled(newrev < currentrev))
other_diff_button_html = """
<td style="border:0;">
<form action="%s" method="get">
<div style="text-align:%s">
<input name="action" value="diff" type="hidden">
<input name="rev1" value="%d" type="hidden">
<input name="rev2" value="%d" type="hidden">
<input value="%s" type="submit"%s>
</div>
</form>
</td>
"""
navigation_html = """
<span class="diff-header">%%s</span>
<table class="diff">
<tr>
%(button)s
<td style="border:0">
%%s
</td>
%(button)s
</tr>
</table>
""" % {'button': other_diff_button_html}
prev_oldrev = (oldrev > 1) and (oldrev - 1) or 1
next_oldrev = (oldrev < currentrev) and (oldrev + 1) or currentrev
prev_newrev = (newrev > 1) and (newrev - 1) or 1
next_newrev = (newrev < currentrev) and (newrev + 1) or currentrev
navigation_html = navigation_html % (title,
page_url, "left", prev_oldrev, oldrev, _("Previous change"), enabled(oldrev > 1),
revert_html,
page_url, "right", newrev, next_newrev, _("Next change"), enabled(newrev < currentrev), )
request.write(f.rawHTML(navigation_html))
def rev_nav_link(enabled, old_rev, new_rev, caption, css_classes, enabled_title, disabled_title):
if enabled:
return currentpage.link_to(request, on=1, querystr={
'action': 'diff',
'rev1': old_rev,
'rev2': new_rev,
}, css_class="diff-nav-link %s" % css_classes, title=enabled_title) + request.formatter.text(caption) + currentpage.link_to(request, on=0)
else:
return '<span class="diff-no-nav-link %(css_classes)s" title="%(disabled_title)s">%(caption)s</span>' % {
'css_classes': css_classes,
'disabled_title': disabled_title,
'caption': caption,
}
rev_info_html = """
<div class="diff-info diff-info-header">%%(rev_first_link)s %%(rev_prev_link)s %(rev_header)s %%(rev_next_link)s %%(rev_last_link)s</div>
<div class="diff-info diff-info-rev-size"><span class="diff-info-caption">%(rev_size_caption)s:</span> <span class="diff-info-value">%%(rev_size)d</span></div>
<div class="diff-info diff-info-rev-author"><span class="diff-info-caption">%(rev_author_caption)s:</span> <span class="diff-info-value">%%(rev_author)s</span></div>
<div class="diff-info diff-info-rev-comment"><span class="diff-info-caption">%(rev_comment_caption)s:</span> <span class="diff-info-value">%%(rev_comment)s</span></div>
""" % {
'rev_header': _('Revision %(rev)d as of %(date)s'),
'rev_size_caption': _('Size'),
'rev_author_caption': _('Editor'),
'rev_ts_caption': _('Date'),
'rev_comment_caption': _('Comment'),
}
rev_info_old_html = rev_info_html % {
'rev_first_link': rev_nav_link(oldrev > 1, 1, newrev, u'\u21e4', 'diff-first-link diff-old-rev', _('Diff with oldest revision in left pane'), _("No older revision available for diff")),
'rev_prev_link': rev_nav_link(oldrev > 1, prev_oldrev, newrev, u'\u2190', 'diff-prev-link diff-old-rev', _('Diff with older revision in left pane'), _("No older revision available for diff")),
'rev_next_link': rev_nav_link((oldrev < currentrev) and (next_oldrev < newrev), next_oldrev, newrev, u'\u2192', 'diff-next-link diff-old-rev', _('Diff with newer revision in left pane'), _("Can't change to revision newer than in right pane")),
'rev_last_link': '',
'rev': oldrev,
'rev_size': oldpage.size(),
'rev_author': oldlog.getEditor(request) or _('N/A'),
'date': request.user.getFormattedDateTime(wikiutil.version2timestamp(oldlog.ed_time_usecs)) or _('N/A'),
'rev_comment': wikiutil.escape(oldlog.comment) or '',
}
rev_info_new_html = rev_info_html % {
'rev_first_link': '',
'rev_prev_link': rev_nav_link((newrev > 1) and (oldrev < prev_newrev), oldrev, prev_newrev, u'\u2190', 'diff-prev-link diff-new-rev', _('Diff with older revision in right pane'), _("Can't change to revision older than revision in left pane")),
'rev_next_link': rev_nav_link(newrev < currentrev, oldrev, next_newrev, u'\u2192', 'diff-next-link diff-new-rev', _('Diff with newer revision in right pane'), _("No newer revision available for diff")),
'rev_last_link': rev_nav_link(newrev < currentrev, oldrev, currentrev, u'\u21e5', 'diff-last-link diff-old-rev', _('Diff with newest revision in right pane'), _("No newer revision available for diff")),
'rev': newrev,
'rev_size': newpage.size(),
'rev_author': newlog.getEditor(request) or _('N/A'),
'date': request.user.getFormattedDateTime(wikiutil.version2timestamp(newlog.ed_time_usecs)) or _('N/A'),
'rev_comment': wikiutil.escape(newlog.comment) or '',
}
if request.user.show_fancy_diff:
request.write(f.rawHTML(diff_html.diff(request, oldpage.get_raw_body(), newpage.get_raw_body(), old_top=rev_info_old_html, new_top=rev_info_new_html, old_top_class="diff-info", new_top_class="diff-info")))
newpage.send_page(count_hit=0, content_only=1, content_id="content-below-diff")
else:
request.write(f.rawHTML('<table class="diff"><tr><td class="diff-info">%s</td><td class="diff-info">%s</td></tr></table>' % (rev_info_old_html, rev_info_new_html)))
lines = diff_text.diff(oldpage.getlines(), newpage.getlines())
if not lines:
msg = f.text(" - " + _("No differences found!"))
if edit_count > 1:
msg = msg + f.paragraph(1) + f.text(_('The page was saved %(count)d times, though!') % {
'count': edit_count}) + f.paragraph(0)
request.write(msg)
else:
if ignorews:
request.write(f.text(_('(ignoring whitespace)')), f.linebreak())
else:
qstr = {'action': 'diff', 'ignorews': '1', }
if rev1:
qstr['rev1'] = str(rev1)
if rev2:
qstr['rev2'] = str(rev2)
request.write(f.paragraph(1), Page(request, pagename).link_to(request,
text=_('Ignore changes in the amount of whitespace'),
querystr=qstr, rel='nofollow'), f.paragraph(0))
request.write(f.preformatted(1))
for line in lines:
if line[0] == "@":
request.write(f.rule(1))
request.write(f.text(line + '\n'))
request.write(f.preformatted(0))
request.write(f.div(0)) # end content div
request.theme.send_footer(pagename)
request.theme.send_closing_html() | 7305385a84c561fc6c5a8b0e8c52635cf19c77d6 | 17,742 |
def SetBmaskName(enum_id, bmask, name):
"""
Set bitmask name (only for bitfields)
@param enum_id: id of enum
@param bmask: bitmask of the constant
@param name: name of bitmask
@return: 1-ok, 0-failed
"""
return idaapi.set_bmask_name(enum_id, bmask, name) | 2a134b496214d7f8e8887dc5a3ca93264da08f5b | 17,743 |
import math
def angle_to(x: int, y: int) -> int:
"""Return angle for given vector pointing from orign (0,0), adjusted for north=0"""
#xt,yt = y,x
#rad = math.atan2(yt,xt)
rad = math.atan2(x,y)
if rad < 0.0:
rad = math.pi + (math.pi + rad)
return rad | 545cfa3769da10eea2138132295e3387c4556b39 | 17,744 |
import sys
import os
def sp_args():
"""Apply quirks for `subprocess.Popen` to have standard behavior in PyInstaller-frozen windows binary.
Returns
-------
dict[str, str or bool or None]
The additional arguments for `subprocess` calls.
"""
if sys.platform.startswith('win32'):
# Prevent Windows from popping up a command window on subprocess calls
startup_info = sp.STARTUPINFO()
startup_info.dwFlags |= sp.STARTF_USESHOWWINDOW
# Make Windows search the ``PATH``
environment = os.environ
else:
startup_info = None
environment = None
# Avoid ``OSError`` exception by redirecting all standard handles
return {'stdout': sp.PIPE, 'stdin': sp.PIPE, 'stderr': sp.PIPE, 'startupinfo': startup_info, 'env': environment,
'close_fds': True} | 76d50f2f6e745e486187900efab0e6d58e0fe292 | 17,745 |
import torch
def adj(triples, num_nodes, num_rels, cuda=False, vertical=True):
"""
Computes a sparse adjacency matrix for the given graph (the adjacency matrices of all
relations are stacked vertically).
:param edges: List representing the triples
:param i2r: list of relations
:param i2n: list of nodes
:return: sparse tensor
"""
r, n = num_rels, num_nodes
size = (r * n, n) if vertical else (n, r * n)
from_indices = []
upto_indices = []
for fr, rel, to in triples:
offset = rel.item() * n
if vertical:
fr = offset + fr.item()
else:
to = offset + to.item()
from_indices.append(fr)
upto_indices.append(to)
indices = torch.tensor([from_indices, upto_indices], dtype=torch.long, device=d(cuda))
assert indices.size(1) == len(triples)
assert indices[0, :].max() < size[0], f'{indices[0, :].max()}, {size}, {r}'
assert indices[1, :].max() < size[1], f'{indices[1, :].max()}, {size}, {r}'
return indices.t(), size | 8531170c20c39011efcc7a2223c4da49c41ffabb | 17,746 |
def join_b2_path(b2_dir, b2_name):
"""
Like os.path.join, but for B2 file names where the root directory is called ''.
:param b2_dir: a directory path
:type b2_dir: str
:param b2_name: a file name
:type b2_name: str
"""
if b2_dir == '':
return b2_name
else:
return b2_dir + '/' + b2_name | 20f4e6e54f7f3b4a1583b503d4aa2d8995318978 | 17,747 |
def actual_line_flux(wavelength,flux, center=None,pass_it=True):
"""Measure actual line flux:
parameters
----------
wavelength: float array
flux: float array
center: float
wavelength to center plot on
output parameters
-----------------
flux in line integrated over region
flux in background over same region
Notes
-----
In novae the line profile is composed of a superposition of emission from
different regions, sometimes optically thick, sometimes thin, but not gaussian in
shape.
Here we plot the profile
provide endpoints (w1,f1), (w2,f2) at the flux level of the background
"""
import numpy as np
from pylab import plot,xlim, ylim, title, xlabel, ylabel, ginput, figure, subplot
from scipy.interpolate import interp1d
# find plot center and range
if type(center) == type(None): center=wavelength.mean()
x1 = center - 7./300*center
x2 = center + 7./300*center
q = (wavelength > x1) & (wavelength < x2)
w = wavelength[q]
flx = flux[q]
y2 = flx.max()
f = figure()
ax = subplot(111)
getit = True
while getit:
ax.plot(w,flx,ls='steps',color='darkblue')
print ("please click the desired limits of the profile at the background level")
print ("no timeout")
aa = ginput(n=2,timeout=0)
x1,y1 = aa[0]
x2,y2 = aa[1]
x1 = float(x1)
x2 = float(x2)
y1 = float(y1)
y2 = float(y2)
q = (w >= x1) & (w <= x2)
bg = interp1d([x1,x2],[y1,y2],)
ax.fill_between(w[q], bg(w[q]), flx[q], color='c')
ans = input("Do you want to continue ?")
if (ans.upper()[0] != 'Y') & (pass_it == False) :
print ("Answer is not yes\n TRY AGAIN ")
ax.cla()
else:
getit = False
# not compute the fluxes
w = w[q]
flx = flx[q]
tot_flx = []
tot_bkg = (y2+y1)*(x2-x1)*0.5
for k in range(1,len(w)):
tot_flx.append(0.25*(flx[k-1]+flx[k])*(w[k-1]+w[k]))
line_flx = np.asarray(tot_flx).sum() - tot_bkg
print (type(line_flx), line_flx)
print (type(tot_bkg), tot_bkg)
print (type( 0.5*(x2+x1) ), 0.5*(x2+x1) )
print ( (" wavelength = %10.2f\n line flux = %10.3e\n"+
" background flux = %10.2e\n FWZI = %10.2f\n")
%( (x2+x1)*0.5, line_flx, tot_bkg, (x2-x1) ) )
return {'wavelength':(x2+x1)*0.5,
'line_flux':line_flx,
'integrated_background_flux':tot_bkg,
"FWZI":(x2-x1)} | 6d4d36e6e632e605158da704cc1e3462cdc173e1 | 17,748 |
def _too_many_contigs(ref_file):
"""Check for more contigs than the maximum samblaster deduplication supports.
"""
max_contigs = 32768
return len(list(ref.file_contigs(ref_file))) >= max_contigs | 03a01719b634d6eea143306f96cae6ea26e3f1f9 | 17,749 |
def survival_regression_metric(metric, outcomes_train, outcomes_test,
predictions, times):
"""Compute metrics to assess survival model performance.
Parameters
-----------
metric: string
Measure used to assess the survival regression model performance.
Options include:
- `brs` : brier score
- `ibs` : integrated brier score
- `auc`: cumulative dynamic area under the curve
- `ctd` : concordance index inverse probability of censoring
weights (ipcw)
predictions: np.array
A numpy array of survival time predictions for the samples.
outcomes_train : pd.DataFrame
A pandas dataframe with rows corresponding to individual samples and
columns 'time' and 'event' for test data.
outcomes_test : pd.DataFrame
A pandas dataframe with rows corresponding to individual samples and
columns 'time' and 'event' for training data.
times: np.array
The time points at which to compute metric value(s).
Returns
-----------
float: The metric value for the specified metric.
"""
survival_train = util.Surv.from_dataframe('event', 'time', outcomes_train)
survival_test = util.Surv.from_dataframe('event', 'time', outcomes_test)
predictions_test = predictions
if metric == 'brs':
return metrics.brier_score(survival_train, survival_test,
predictions_test, times)[-1]
elif metric == 'ibs':
return metrics.integrated_brier_score(survival_train, survival_test,
predictions_test, times)
elif metric == 'auc':
return metrics.cumulative_dynamic_auc(survival_train, survival_test,
1-predictions_test, times)[0]
elif metric == 'ctd':
vals = []
for i in range(len(times)):
vals.append(metrics.concordance_index_ipcw(survival_train, survival_test,
1-predictions_test[:,i],
tau=times[i])[0])
return vals
else:
raise NotImplementedError() | 244767ca0533af2fe792fe226fb2a9eb5e166201 | 17,750 |
import signal
def _get_all_valid_corners(img_arr, crop_size, l_thresh, corner_thresh):
"""Get all valid corners for random cropping"""
valid_pix = img_arr >= l_thresh
kernel = np.ones((crop_size, crop_size))
conv = signal.correlate2d(valid_pix, kernel, mode='valid')
return conv > (corner_thresh * crop_size ** 2) | 6e357a6585e8485de74e01ced62eedcbfe33bdec | 17,751 |
def remove_dup(a):
""" remove duplicates using extra array """
res = []
count = 0
for i in range(0, len(a)-1):
if a[i] != a[i+1]:
res.append(a[i])
count = count + 1
res.append(a[len(a)-1])
print('Total count of unique elements: {}'.format(count + 1))
return res | 8286c07098c078cd61d4890cd120723b9e9f04e7 | 17,752 |
def mjd2crnum(mjd):
"""
Converts MJD to Carrington Rotation number
Mathew Owens, 16/10/20
"""
return 1750 + ((mjd-45871.41)/27.2753) | 233f91e6de4c5105732fc2c8f9f33d054491e1d2 | 17,753 |
def poly_print_simple(poly,pretty=False):
"""Show the polynomial in descending form as it would be written"""
# Get the degree of the polynomial in case it is in non-normal form
d = poly.degree()
if d == -1:
return f"0"
out = ""
# Step through the ascending list of coefficients backward
# We do this because polynomials are usually written in descending order
for pwr in range(d,-1,-1):
# Skip the zero coefficients entirely
if poly[pwr] == 0:
continue
coe = poly[pwr]
val = abs(coe)
sgn = "-" if coe//val == -1 else "+"
# When the coefficient is 1 or -1 don't print it unless it is the
# coefficient for x^0
if val == 1 and pwr != 0:
val = ""
# If it is the first term include the sign of the coefficient
if pwr == d:
if sgn == "+":
sgn = ""
# Handle powers of 1 or 0 that appear as the first term
if pwr == 1:
s = f"{sgn}{val}x"
elif pwr == 0:
s = f"{sgn}{val}"
else:
if pretty == False:
s = f"{sgn}{val}x^{pwr}"
else:
s = f"{sgn}{val}x$^{{{pwr}}}$"
# If the power is 1 just show x rather than x^1
elif pwr == 1:
s = f" {sgn} {val}x"
# If the power is 0 only show the sign and value
elif pwr == 0:
s = f" {sgn} {val}"
# Otherwise show everything
else:
if pretty == False:
s = f" {sgn} {val}x^{pwr}"
else:
s = f" {sgn} {val}x$^{{{pwr}}}$"
out += s
return out | 903f9d4a703e3f625da5f13f6fe084e8894d723b | 17,754 |
def parse_file_():
"""
Retrieves the parsed information by specifying the file, the timestamp and latitude + longitude. Don't forget
to encode the plus sign '+' = %2B!
Example: GET /parse/data/ecmwf/an-2017-09-14.grib?timestamp=2017-09-16T15:21:20%2B00:00&lat=48.398400&lon=9.591550
:param fileName: path to a retrieved ecmwf grib file.
:return: OK including json content or empty not found
"""
try:
[point, date] = validate_request_parameters()
except ValueError, e:
return misc.create_response(jsonify(message=e.message), 400)
file_name = misc.build_file_name(date)
path_to_file = file_directory + os.sep + file_name
files = file_status.get_available_files()
if file_name not in files or not os.path.isfile(path_to_file):
msg = {'message': 'Given filename={} could not be found in the available files are attached.'.format(file_name,
files),
'data': {'files': files}}
return misc.create_response(jsonify(transform_message(msg).data), 404)
result = cache.cache.get(request.url)
# check cache
if not result:
result = parse_action.parse(path_to_file, point, date)
return Response(json.dumps(transform(result), default=CopernicusData.json_serial, indent=2), mimetype="text/json",
status=200) | be54ab0dc3b9ec0a0f5462684f66d209e6e72728 | 17,755 |
def make_model(drc_csv: str, sat_tables: list, sector_info_csv: str,
ia_tables=None, units_csv='', compartments_csv='',
locations_csv='') -> model.Model:
"""
Creates a full EE-IO model with all information required for calculations,
JSON-LD export, validation, etc.
:param drc_csv: CSV file with the direct requirements matrix A
:param sat_tables: a list of CSV files with satellite tables
:param sector_info_csv: CSV file with sector metadata
:param ia_tables: an optional list of CSV files with impact assessment factors.
:param units_csv: optional file with unit metadata
:param compartments_csv: optional file with compartment metadata
:param locations_csv: optional file with location metadata
"""
drc = read_csv_data_frame(drc_csv)
sat_table = make_sat_table(*sat_tables)
sectors = ref.SectorMap.read(sector_info_csv)
ia_table = None
if ia_tables is not None and len(ia_tables) > 0:
ia_table = ia.Table()
for iat in ia_tables:
ia_table.add_file(iat)
def read_map(name, clazz):
if name is None or name == '':
return clazz.create_default()
else:
return clazz.read(name)
units = read_map(units_csv, ref.UnitMap)
compartments = read_map(compartments_csv, ref.CompartmentMap)
locations = read_map(locations_csv, ref.LocationMap)
return model.Model(drc, sat_table, sectors, ia_table, units, compartments,
locations) | f990f1617de29f75e4f86acc7647f7d9a06bfa53 | 17,756 |
def newNXentry(parent, name):
"""Create new NXentry group.
Args:
parent (h5py.File or h5py.Group): hdf5 file handle or group
group (str): group name without extension (str)
Returns:
hdf5.Group: new NXentry group
"""
grp = parent.create_group(name)
grp.attrs["NX_class"] = "NXentry"
if "NX_class" in parent.attrs:
if parent.attrs["NX_class"] == "NXentry":
grp.attrs["NX_class"] = "NXsubentry"
return grp | 1529fbe80ca8a23f8cd7717c5df5d7d239840149 | 17,757 |
from datetime import datetime
def _build_europe_gas_day_tzinfo():
"""
Build the Europe/Gas_Day based on the CET time.
:raises ValueError: When something is wrong with the CET/CEST definition
"""
zone = 'Europe/Gas_Day'
transitions = _get_transitions()
transition_info_cet = _get_transition_info_cet()
difference_sec = 3600 * 6
transition_info_gas_day = []
for dt1, dt2, name in transition_info_cet:
sec1 = dt1.seconds - difference_sec
hours1 = sec1 / (60 * 60)
gas_dt1 = datetime.timedelta(hours=hours1)
sec2 = dt2.seconds - difference_sec
hours2 = sec2 / (60 * 60)
gas_dt2 = datetime.timedelta(hours=hours2)
if name == 'CET':
name = 'CET'
elif name == 'CEST':
name = 'CEST'
else:
raise ValueError("tz name not CET or CEST")
transition_info_gas_day.append((gas_dt1, gas_dt2, name))
gas_day_cls = type('Europe/Gas_Day', (DstTzInfo,), dict(
zone=zone,
_utc_transition_times=transitions,
_transition_info=transition_info_gas_day
))
_tzinfo_cache[zone] = gas_day_cls()
return _tzinfo_cache[zone] | abc83bc3096c0dacfb7d9be88fdf4043ee328cf1 | 17,758 |
from typing import Set
from typing import Optional
from typing import List
from typing import Dict
def prepare_variants_relations_data(
queryset: "QuerySet",
fields: Set[str],
attribute_ids: Optional[List[int]],
warehouse_ids: Optional[List[int]],
) -> Dict[int, Dict[str, str]]:
"""Prepare data about variants relation fields for given queryset.
It return dict where key is a product pk, value is a dict with relation fields data.
"""
warehouse_fields = ProductExportFields.WAREHOUSE_FIELDS
attribute_fields = ProductExportFields.VARIANT_ATTRIBUTE_FIELDS
result_data: Dict[int, dict] = defaultdict(dict)
fields.add("variants__pk")
if attribute_ids:
fields.update(ProductExportFields.VARIANT_ATTRIBUTE_FIELDS.values())
if warehouse_ids:
fields.update(ProductExportFields.WAREHOUSE_FIELDS.values())
relations_data = queryset.values(*fields)
for data in relations_data.iterator():
pk = data.get("variants__pk")
image = data.pop("variants__images__image", None)
result_data = add_image_uris_to_data(
pk, image, "variants__images__image", result_data
)
# handle attribute and warehouse data
attribute_data: dict = {}
warehouse_data: dict = {}
attribute_pk = str(data.pop(attribute_fields["attribute_pk"], ""))
attribute_data = {
"slug": data.pop(attribute_fields["slug"], None),
"value": data.pop(attribute_fields["value"], None),
}
warehouse_pk = str(data.pop(warehouse_fields["warehouse_pk"], ""))
warehouse_data = {
"slug": data.pop(warehouse_fields["slug"], None),
"qty": data.pop(warehouse_fields["quantity"], None),
}
if attribute_ids and attribute_pk in attribute_ids:
result_data = add_attribute_info_to_data(
pk, attribute_data, "variant attribute", result_data
)
if warehouse_ids and warehouse_pk in warehouse_ids:
result_data = add_warehouse_info_to_data(pk, warehouse_data, result_data)
result: Dict[int, Dict[str, str]] = {
pk: {
header: ", ".join(sorted(values)) if isinstance(values, set) else values
for header, values in data.items()
}
for pk, data in result_data.items()
}
return result | 37c275898bb69fc61cbdddf2f4914ac77f22e7ed | 17,759 |
import torch
def vnorm(velocity, window_size):
"""
Normalize velocity with latest window data.
- Note that std is not divided. Only subtract mean
- data should have dimension 3.
"""
v = velocity
N = v.shape[1]
if v.dim() != 3:
print("velocity's dim must be 3 for batch operation")
exit(-1)
on_gpu = v.is_cuda
if not on_gpu and torch.cuda.is_available():
v = v.cuda()
padding_size = window_size - 1
batch_size = v.shape[0]
pad = v[:, 0, :].reshape(batch_size, 1, 3).expand(batch_size, padding_size, 3)
v_normed = torch.cat([pad, v], dim=1)
for i in range(window_size-1, 0, -1):
v_normed[:, i-1:i-1+N, :] += v
v_normed = v_normed[:, :N, :] / float(window_size)
v_normed = v - v_normed
if not on_gpu:
v_normed = v_normed.cpu()
return v_normed | 69f3c8cd6a5628d09a7fd78f3b5b779611a68411 | 17,760 |
import torch
def valid_collate_fn(data):
"""Build mini-batch tensors from a list of (image, caption) tuples.
Args:
data: list of (image, caption) tuple.
- image: torch tensor of shape (3, 256, 256).
- caption: torch tensor of shape (?); variable length.
Returns:
images: torch tensor of shape (batch_size, 3, 256, 256).
targets: torch tensor of shape (batch_size, padded_length).
lengths: list; valid length for each padded caption.
"""
# Sort a data list by caption length
data.sort(key=lambda x: len(x[1]), reverse=True)
images, captions, product_ids, query_ids, boxes = zip(*data)
# Merge images (convert tuple of 3D tensor to 4D tensor)
images_lengths = [image.shape[0] for image in images]
images_tensor = torch.zeros(len(images), max(images_lengths), 2048)
images_masks = torch.zeros(len(images), max(images_lengths))
boxes_tensor = torch.zeros(len(images), max(images_lengths), 4)
for i, image in enumerate(images):
end = images_lengths[i]
images_tensor[i, :end,:] = image[:,:]
images_masks[i, :end] = 1
boxes_tensor[i, :end-1,:] = boxes[i][:,:]
#images_tensor[i, :end,:] = image[:end,:]
#images = torch.stack(images, 0)
# Merget captions (convert tuple of 1D tensor to 2D tensor)
lengths = [len(cap) for cap in captions]
targets = torch.zeros(len(captions), max(lengths)).long()
txt_masks = torch.zeros(len(captions), max(lengths))
for i, cap in enumerate(captions):
end = lengths[i]
targets[i, :end] = cap[:end]
txt_masks[i, :end] = 1
return images_tensor, targets, images_lengths, lengths, images_masks, txt_masks, product_ids, query_ids, boxes_tensor | 3eabc71d3ae68d4ca24d1a146be4fab8543c8566 | 17,761 |
def parse_statement(tokens):
"""
statement:
| 'while' statement_list 'do' statement_list 'end'
| 'while' statement_list 'do' 'end'
| 'if' if_body
| num_literal
| string_literal
| builtin
| identifier
"""
if tokens.consume_maybe("while"):
condition = parse_statement_list(tokens)
tokens.consume_only("do")
statement = WhileStatement(condition)
if tokens.consume_maybe("end"):
return statement
body = parse_statement_list(tokens)
statement.body = body
tokens.consume_only("end")
return statement
if tokens.consume_maybe("if"):
return parse_if_body(tokens)
token = tokens.consume()
return Statement(token) | dfa26e4b834104a85b7266628fb6635c86e64b09 | 17,762 |
def vector_to_pytree_fun(func):
"""Make a pytree -> pytree function from a vector -> vector function."""
def wrapper(state):
return func(Vector(state)).pytree
return wrapper | 79cc16c0e9187fc3bb944f40433ba1bd7850cb6e | 17,763 |
def _filter_contacts(people_filter, maillist_filter, qs, values):
"""Helper for filtering based on subclassed contacts.
Runs the filter on separately on each subclass (field defined by argument,
the same values are used), then filters the queryset to only keep items
that have matching.
"""
people = Person.objects.filter(**{people_filter + '__in': values})
mailing_lists = Maillist.objects.filter(**{maillist_filter + '__in': values})
return qs.filter(Q(contact__in=people) | Q(contact__in=mailing_lists)) | 704396156370596433e78be1bb7bf5b4a77f284d | 17,764 |
def viz_property(statement, properties):
"""Create properties for graphviz element"""
if not properties:
return statement + ";";
return statement + "[{}];".format(" ".join(properties)) | 518d4a662830737359a8b3cb9cec651394823785 | 17,765 |
def create_fsl_fnirt_nonlinear_reg(name='fsl_fnirt_nonlinear_reg'):
"""
Performs non-linear registration of an input file to a reference file
using FSL FNIRT.
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
nonlinear_register : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.input_skull : string (nifti file)
File of input brain with skull
inputspec.reference_skull : string (nifti file)
Target brain with skull to normalize to
inputspec.fnirt_config : string (fsl fnirt config file)
Configuration file containing parameters that can be specified in fnirt
Workflow Outputs::
outputspec.output_brain : string (nifti file)
Normalizion of input brain file
outputspec.nonlinear_xfm : string
Nonlinear field coefficients file of nonlinear transformation
Registration Procedure:
1. Perform a nonlinear registration on an input file to the reference file utilizing affine
transformation from the previous step as a starting point.
2. Invert the affine transformation to provide the user a transformation (affine only) from the
space of the reference file to the input file.
Workflow Graph:
.. image:: ../images/nonlinear_register.dot.png
:width: 500
Detailed Workflow Graph:
.. image:: ../images/nonlinear_register_detailed.dot.png
:width: 500
"""
nonlinear_register = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['input_brain',
'input_skull',
'reference_brain',
'reference_skull',
'interp',
'ref_mask',
'linear_aff',
'fnirt_config']),
name='inputspec')
outputspec = pe.Node(util.IdentityInterface(fields=['output_brain',
'nonlinear_xfm']),
name='outputspec')
nonlinear_reg = pe.Node(interface=fsl.FNIRT(),
name='nonlinear_reg_1')
nonlinear_reg.inputs.fieldcoeff_file = True
nonlinear_reg.inputs.jacobian_file = True
brain_warp = pe.Node(interface=fsl.ApplyWarp(),
name='brain_warp')
nonlinear_register.connect(inputspec, 'input_skull',
nonlinear_reg, 'in_file')
nonlinear_register.connect(inputspec, 'reference_skull',
nonlinear_reg, 'ref_file')
nonlinear_register.connect(inputspec, 'interp',
brain_warp, 'interp')
nonlinear_register.connect(inputspec, 'ref_mask',
nonlinear_reg, 'refmask_file')
# FNIRT parameters are specified by FSL config file
# ${FSLDIR}/etc/flirtsch/TI_2_MNI152_2mm.cnf (or user-specified)
nonlinear_register.connect(inputspec, 'fnirt_config',
nonlinear_reg, 'config_file')
nonlinear_register.connect(inputspec, 'linear_aff',
nonlinear_reg, 'affine_file')
nonlinear_register.connect(nonlinear_reg, 'fieldcoeff_file',
outputspec, 'nonlinear_xfm')
nonlinear_register.connect(inputspec, 'input_brain',
brain_warp, 'in_file')
nonlinear_register.connect(nonlinear_reg, 'fieldcoeff_file',
brain_warp, 'field_file')
nonlinear_register.connect(inputspec, 'reference_brain',
brain_warp, 'ref_file')
nonlinear_register.connect(brain_warp, 'out_file',
outputspec, 'output_brain')
return nonlinear_register | dcb723e8fc33df2c8cc167c2bedc72f802d124c5 | 17,766 |
def _subspace_plot(
inputs, output, *, input_names, output_name, scatter_args=None,
histogram_args=None, min_output=None, max_output=None
):
"""
Do actual plotting
"""
if scatter_args is None:
scatter_args = {}
if histogram_args is None:
histogram_args = {}
if min_output is None:
min_output = min(output)
if max_output is None:
max_output = max(output)
# see https://matplotlib.org/examples/pylab_examples/multi_image.html
_, num_inputs = inputs.shape
fig, axes, grid = _setup_axes(input_names=input_names)
if output_name is not None:
fig.suptitle(output_name)
norm = _Normalize(min_output, max_output)
hist_plots = []
for i in range(num_inputs):
hist_plots.append(_plot_hist(
inputs[:, i], axis=axes[i][i], **histogram_args
))
scatter_plots = []
scatter_plots_grid = []
for y_index in range(num_inputs):
scatter_plots_grid.append([])
for x_index in range(y_index):
sc_plot = _plot_scatter(
x=inputs[:, x_index], y=inputs[:, y_index], z=output,
axis=axes[y_index][x_index], # check order
norm=norm, **scatter_args
)
scatter_plots.append(sc_plot)
scatter_plots_grid[y_index].append(sc_plot)
cbar_ax = fig.add_subplot(grid[0, 1:])
fig.colorbar(
scatter_plots[0], cax=cbar_ax, orientation='horizontal',
)
cbar_ax.set_aspect(1/20)
return fig | 720092b24f1675f4f4c64c206cd7cad8b3a6dee6 | 17,767 |
import os
import inspect
def preload_template(fname, comment='#'):
"""Preloads a template from a file relative to the calling Python file."""
comment = comment.strip()
if not os.path.isabs(fname):
previous_frame = inspect.currentframe().f_back
caller_fname, _, _, _, _ = inspect.getframeinfo(previous_frame)
fname = os.path.dirname(caller_fname) + os.sep + fname
with open(fname, 'r') as fil:
template = fil.read()
return annotate_block(template, fname, comment) | 054d89ae06f3aa3c93fe72e3788ca750bc8578d4 | 17,768 |
import platform
import subprocess
import re
def detect_windows_needs_driver(sd, print_reason=False):
"""detect if Windows user needs to install driver for a supported device"""
need_to_install_driver = False
if sd:
system = platform.system()
#print(f'in detect_windows_needs_driver system:{system}')
if system == "Windows":
# if windows, see if we can find a DeviceId with the vendor id
# Get-PnpDevice | Where-Object{ ($_.DeviceId -like '*10C4*')} | Format-List
command = 'powershell.exe "[Console]::OutputEncoding = [Text.UTF8Encoding]::UTF8; Get-PnpDevice | Where-Object{ ($_.DeviceId -like '
command += f"'*{sd.usb_vendor_id_in_hex.upper()}*'"
command += ')} | Format-List"'
#print(f'command:{command}')
_, sp_output = subprocess.getstatusoutput(command)
#print(f'sp_output:{sp_output}')
search = f'CM_PROB_FAILED_INSTALL'
#print(f'search:"{search}"')
if re.search(search, sp_output, re.MULTILINE):
need_to_install_driver = True
# if the want to see the reason
if print_reason:
print(sp_output)
return need_to_install_driver | 52e681152442b27944396bf35d6a7c80a39b8639 | 17,769 |
def install_microcode_filter(*args):
"""
install_microcode_filter(filter, install=True)
register/unregister non-standard microcode generator
@param filter: - microcode generator object (C++: microcode_filter_t
*)
@param install: - TRUE - register the object, FALSE - unregister (C++:
bool)
"""
return _ida_hexrays.install_microcode_filter(*args) | e51c4f5bcc749692bd69c0d9024e6e004da9e9ac | 17,770 |
def get_marvel_character_embed(attribution_text, result):
"""Parses a given JSON object that contains a result of a Marvel
character and turns it into an Embed
:param attribution_text: The attributions to give to Marvel for using the API
:param result: A JSON object of a Marvel API call result
:returns: A nice-looking Embed for discord users to look at
"""
return Embed(
title=result["name"],
description=result["description"],
colour=PRIMARY_EMBED_COLOR
).add_field(
name="Series",
value="\n".join([
" * `{}`".format(series["name"])
for series in result["series"]["items"]
])
).add_field(
name="Comics",
value="\n".join([
" * `{}`".format(comic["name"])
for comic in result["comics"]["items"]
])
).set_image(
url="{}.{}".format(
result["thumbnail"]["path"],
result["thumbnail"]["extension"]
)
).set_footer(
text=attribution_text
) | ed315c2581de86f539c7a08ff6c37e9a889f2670 | 17,771 |
def get_or_create_dfp_targeting_key(name, key_type='FREEFORM'):
"""
Get or create a custom targeting key by name.
Args:
name (str)
Returns:
an integer: the ID of the targeting key
"""
key_id = dfp.get_custom_targeting.get_key_id_by_name(name)
if key_id is None:
key_id = dfp.create_custom_targeting.create_targeting_key(name, key_type=key_type)
return key_id | 36211d5a383d54fde3e42d30d81778008343c676 | 17,772 |
def positive(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.positive <numpy.positive>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in positive")
return Array._new(np.positive(x._array)) | 2caa9b1714c549e390dba494a748aa86d5077d67 | 17,773 |
import csv
def read_loss_file(path):
"""Read the given loss csv file and process its data into lists that can be
plotted by matplotlib.
Args:
path (string): The path to the file to be read.
Returns: A list of lists, one list for each subnetwork containing the loss
values over time.
"""
with open(path, 'r') as csvfile:
reader = csv.reader(csvfile)
data = []
for row in reader:
# Ignore the epoch numbers
if len(data) == 0:
data = [[] for _ in row[1:]]
for i in range(1, len(row)):
data[i-1].append(float(row[i]))
return data | 8e861f0bf46db5085ea2f30a7e70a4bdfa0b9697 | 17,774 |
from typing import Union
def number2human(n: Union[int, float]) -> str:
"""
Format large number into readable string for a human
Examples:
>>> number2human(1000)
'1.0K'
>>> number2human(1200000)
'1.2M'
"""
# http://code.activestate.com/recipes/578019
# >>> bytes2human(10000)
# '9.8K'
# >>> bytes2human(100001221)
# '95.4M'
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = (10 ** 3) ** (i + 1)
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%.2f" % n | 26e99ca6b3cf51bf554018e1c97a1c8bebd51811 | 17,775 |
def binomial_confidence_interval(successes, trials, error_rate):
"""Computes a confidence interval on the true p of a binomial.
Assumes:
- The given `successes` count outcomes of an iid Bernoulli trial
with unknown probability p, that was repeated `trials` times.
Guarantees:
- The probability (over the randomness of drawing the given sample)
that the true p is outside the returned interval is no more than
the given `error_rate`.
Args:
successes: Python or numpy `int` number of successes.
trials: Python or numpy `int` number of trials.
error_rate: Python `float` admissible rate of mistakes.
Returns:
low_p: Lower bound of confidence interval.
high_p: Upper bound of confidence interval.
Raises:
ValueError: If scipy is not available.
"""
def p_small_enough(p):
# This is positive iff p is smaller than the desired upper bound.
log_prob = stats.binom.logcdf(successes, trials, p)
return log_prob - np.log(error_rate / 2.)
def p_big_enough(p):
# This is positive iff p is larger than the desired lower bound.
# Scipy's survival function for discrete random variables excludes
# the argument, but I want it included for this purpose.
log_prob = stats.binom.logsf(successes-1, trials, p)
return log_prob - np.log(error_rate / 2.)
if successes < trials:
high_p = optimize.brentq(
p_small_enough, successes / float(trials), 1., rtol=1e-9)
else:
high_p = 1.
if successes > 0:
low_p = optimize.brentq(
p_big_enough, 0., successes / float(trials), rtol=1e-9)
else:
low_p = 0.
return low_p, high_p | 03caf19e30a4b280c6b060b7ba4b1166b526feec | 17,776 |
def try_parse_func_decl(start, end):
"""Parse a function declarator between start and end.
Expects that tokens[end-1] is a close parenthesis. If a function
declarator is successfully parsed, returns the decl_node.Function
object. Otherwise, returns None.
"""
open_paren = find_pair_backward(end - 1)
try:
params, index = parse_parameter_list(open_paren + 1)
except ParserError as e:
log_error(e)
return None
if index == end - 1:
return decl_nodes.Function(
params, parse_declarator(start, open_paren)) | 1bcdce513fdaf6e28e034ba1578bd271a00c31a5 | 17,777 |
import contextlib
def eth_getBlockTransactionCountByNumber(block_number: int) -> int:
""" See EthereumAPI#get_block_transaction_count_by_number. """
with contextlib.closing(EthereumAPI()) as api:
return api.get_block_transaction_count_by_number(block_number) | d4fbd368bb49854ceee589ba20c956275ece95c6 | 17,778 |
from typing import List
from typing import Any
from typing import NamedTuple
def day(db: Database, site: str = 'test', tag: str = '', search_body: str = '') -> List[Any]:
"""
戻り値 名前付きタプルのリスト # xxx List[DayCount] するにはclass DayCount(NamedTuple) 必要 pypy…
"""
tag_where = ''
body_where = ''
param = [site] # type: List[Union[str, int]]
if tag != '':
tag_where = "AND (tags like ? or tags like ?)"
param.extend([f"% {tag} %", f"% {tag}:%"])
if search_body != '':
body_where = "AND body LIKE ?"
param.append(f"%{search_body}%")
if db.dbms == 'postgresql':
date = 'to_char(DATE("datetime"),\'YYYY-MM-DD\')'
else:
date = 'DATE("datetime")'
sql = f"""
SELECT
{date} as "date" ,
COUNT(*) as "count"
FROM basedata
WHERE site = ?
{tag_where}
{body_where}
GROUP BY DATE("datetime")
ORDER BY DATE("datetime") DESC
LIMIT ?
"""
limit = 1000 # PENDING ページングする?
param.append(limit)
day_count = NamedTuple('day_count', (('date', str), ('count', int)))
logger.log(5, "日付投稿数SQL: %s", sql)
logger.log(5, "プレースホルダパラメータ: %s", param)
return db.execute_fetchall(sql, param, namedtuple=day_count) | c60a4a8aabc546a2dc7b412d73c1d0a97d7ccc25 | 17,779 |
import os
def xml_files_list(path):
"""
Return the XML files found in `path`
"""
return (f for f in os.listdir(path) if f.endswith(".xml")) | 27cc2769e34f55263c60ba07a92e181bfec641ab | 17,780 |
import math
def ppv2(
aim_stars=None, speed_stars=None, max_combo=None,
nsliders=None, ncircles=None, nobjects=None, base_ar=5.0,
base_od=5.0, mode=MODE_STD, mods=MODS_NOMOD, combo=None,
n300=None, n100=0, n50=0, nmiss=0, score_version=1, bmap=None
):
"""
calculates ppv2
returns (pp, aim_pp, speed_pp, acc_pp, acc_percent)
if bmap is provided, mode, base_ar, base_od, max_combo,
nsliders, ncircles and nobjects are taken from it. otherwise
they must be provided.
if combo is None, max_combo is used.
if n300 is None, max_combo - n100 - n50 - nmiss is used.
"""
if mode != MODE_STD:
info(
"ppv2 is only implemented for osu!std at the moment\n"
)
raise NotImplementedError
if bmap != None:
mode = bmap.mode
base_ar = bmap.ar
base_od = bmap.od
max_combo = bmap.max_combo()
nsliders = bmap.nsliders
ncircles = bmap.ncircles
nobjects = len(bmap.hitobjects)
else:
if aim_stars == None:
raise ValueError("missing aim_stars or bmap")
if speed_stars == None:
raise ValueError("missing speed_stars")
if max_combo == None:
raise ValueError("missing max_combo or bmap")
if nsliders == None:
raise ValueError("missing nsliders or bmap")
if ncircles == None:
raise ValueError("missing ncircles or bmap")
if nobjects == None:
raise ValueError("missing nobjects or bmap")
if max_combo <= 0:
info("W: max_combo <= 0, changing to 1\n")
max_combo = 1
if combo == None:
combo = max_combo - nmiss
if n300 == None:
n300 = nobjects - n100 - n50 - nmiss
# accuracy ----------------------------------------------------
accuracy = acc_calc(n300, n100, n50, nmiss)
real_acc = accuracy
if score_version == 1:
# scorev1 ignores sliders since they are free 300s
# for whatever reason it also ignores spinners
nspinners = nobjects - nsliders - ncircles
real_acc = acc_calc(
n300 - nsliders - nspinners, n100, n50, nmiss
)
# can go negative if we miss everything
real_acc = max(0.0, real_acc)
elif score_version == 2:
ncircles = nobjects
else:
info("unsupported scorev%d\n" % (score_version))
raise NotImplementedError
# global values -----------------------------------------------
def low_objects(stars):
multiplier = min(0.5, 0.59 + (-0.59 * math.exp(-0.0038 * nobjects)))
multiplier = min(0.95 + min(0.1, nobjects / 5000),
0.55 + multiplier + max(0, 0.4 - pp_base(stars) / 12.5))
def bonus(n):
if n <= 500:
return multiplier
elif n <= 2000:
return bonus(500) + 0.3 * min(1, (n-500) / 1500)
elif n > 2000:
return bonus(2000) + 0.5 * math.log10(n / 2000)
return bonus(nobjects)
miss_penality = pow(0.97, nmiss)
combo_break = pow(combo, 0.8) / pow(max_combo, 0.8)
# calculate stats with mods
speed_mul, ar, od, _, _ = (
mods_apply(mods, ar=base_ar, od=base_od)
)
# ar bonus ----------------------------------------------------
ar_bonus = 1.0
if ar > 10.33:
ar_bonus += 0.45 * (ar - 10.33)
elif ar < 8.0:
low_ar_bonus = 0.01 * (8.0 - ar)
if mods & MODS_HD != 0:
low_ar_bonus *= 2.0
ar_bonus += low_ar_bonus
# aim pp ------------------------------------------------------
aim = pp_base(aim_stars)
aim *= low_objects(aim_stars)
aim *= miss_penality
aim *= combo_break
aim *= ar_bonus
if mods & MODS_HD != 0:
aim *= 1.02 + (11 - ar) / 50
if mods & MODS_FL != 0:
aim *= max(1, 1.45 * low_objects(aim_stars))
acc_bonus = 0.5 + accuracy / 2.0
od_bonus = 0.98 + (od * od) / 2500.0
aim *= acc_bonus
aim *= od_bonus
# speed pp ----------------------------------------------------
speed = pp_base(speed_stars)
speed *= low_objects(speed_stars)
speed *= miss_penality
speed *= combo_break
speed *= acc_bonus
speed *= od_bonus
if mods & MODS_HD != 0:
speed *= 1.18
# acc pp ------------------------------------------------------
acc = pow(1.52163, od) * pow(real_acc, 24.0) * 2.83
# length bonus (not the same as speed/aim length bonus)
acc *= min(1.15, pow(ncircles / 1000.0, 0.3))
if mods & MODS_HD != 0:
acc *= 1.02
if mods & MODS_FL != 0:
acc *= 1.02
# total pp ----------------------------------------------------
final_multiplier = 1.12
if mods & MODS_NF != 0:
final_multiplier *= 0.90
if mods & MODS_SO != 0:
final_multiplier *= 0.95
total = (
pow(
pow(aim, 1.1) + pow(speed, 1.1) + pow(acc, 1.1),
1.0 / 1.1
) * final_multiplier
)
return (total, aim, speed, acc, accuracy * 100.0) | c4cc793b7eb2acc45ca83762f946a476452a5e34 | 17,781 |
import os
def frame_shows_car(base_dir, frame, data_dir):
"""Return True if frame shows car. """
sem_seg = cv2.imread(os.path.join(base_dir, "semantic_segmentation/semantic_segmentation" + str(frame) + ".png"),
-1)
class_id_dict = pre_processing.get_dict_from_file(data_dir, "class_id_legend.txt")
return int(class_id_dict['car']) in np.unique(sem_seg) | 0759b556f6b985a7aca8384d7071a38ee387e190 | 17,782 |
def p_portail_home(request):
""" Portail d'accueil de CRUDY """
crudy = Crudy(request, "portail")
title = crudy.application["title"]
crudy.folder_id = None
crudy.layout = "portail"
return render(request, 'p_portail_home.html', locals()) | a883cafd84b1ce24ead37ebbe6c0e3a15ea476c6 | 17,783 |
import os
def generate_image_list(dir_path, max_dataset_size=float("inf")):
"""
Traverse the directory to generate a list of images path.
Args:
dir_path (str): image directory.
max_dataset_size (int): Maximum number of return image paths.
Returns:
Image path list.
"""
images = []
assert os.path.isdir(dir_path), '%s is not a valid directory' % dir_path
for root, _, fnames in sorted(os.walk(dir_path)):
for fname in fnames:
if is_image(fname):
path = os.path.join(root, fname)
images.append(path)
print("len(images):", len(images))
return images[:min(max_dataset_size, len(images))] | b92efed79dd9c0904c01d4ffa21ec30f7da57bd7 | 17,784 |
def A_norm(freqs,eta):
"""Calculates the constant scaling factor A_0
Parameters
----------
freqs : array
The frequencies in Natural units (Mf, G=c=1) of the waveform
eta : float
The reduced mass ratio
"""
const = np.sqrt(2*eta/3/np.pi**(1/3))
return const*freqs**-(7/6) | 74947e34efd7b6b0bb31aac35c9932623d4a28aa | 17,785 |
from typing import IO
from typing import Counter
from operator import sub
def task1(input_io: IO) -> int:
"""
Solve task 1.
Parameters
----------
input_io: IO
Day10
stream of adapters joltage.
Return
------
int
number of differentes of 1 times number of diferences of 3.
"""
numbers = list(read_numbers(input_io))
numbers.append(0)
numbers.sort()
counter = Counter(map(sub, numbers[1:], numbers[:-1]))
return counter[1] * (counter[3] + 1) | b566a79013f442b7216118458958212186e57f07 | 17,786 |
import json
import logging
import sys
def get_logger(name, log_dir, config_dir):
"""
Creates a logger object
Parameters
----------
name: Name of the logger file
log_dir: Directory where logger file needs to be stored
config_dir: Directory from where log_config.json needs to be read
Returns
-------
A logger object which writes to both file and stdout
"""
config_dict = json.load(open( config_dir + 'log_config.json'))
config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')
logging.config.dictConfig(config_dict)
logger = logging.getLogger(name)
std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logging.Formatter(std_out_format))
logger.addHandler(consoleHandler)
return logger | 511623057b7e98adbaea628d495afee4c217fcb1 | 17,787 |
def get_total_balance(view_currency='BTC') -> float:
"""
Shows total balance for account in chosen currency
:param view_currency: currency for total balance
:return: total balance amount for account
"""
result = pay.get_balance()
balance_dict = result.get('balance')
total = 0
for currency in balance_dict:
total += ((balance_dict.get(currency).get(view_currency).get('total')) +
(balance_dict.get(currency).get(view_currency).get('reserved')))
return total | 9e595eceac7df63779cd8c8e6d155092ec76a36e | 17,788 |
import binascii
def bin_hex(binary):
"""
Convert bytes32 to string
Parameters
----------
input: bytes object
Returns
-------
str
"""
return binascii.hexlify(binary).decode('utf-8') | 41f9c8a498aa3628f64cf59c93896f42d8dfd56a | 17,789 |
def build_format(name: str, pattern: str, label: bool) -> str:
"""Create snippet format.
:param name: Instruction name
:param pattern: Instruction regex pattern
"""
snip: str = f"{name:7s}" + pattern.format(**SNIPPET_REPLACEMENTS)
snip = snip.replace("(", "")
snip = snip.replace(")", "")
snip = snip.replace("number?\\\\$reg\\", "number(\\$reg)")
snip = snip.replace("\\$", "")
replace_ct = 1
reg_ct = snip.count("reg")
for i in range(0, reg_ct):
f = f"${REG_ARGS[i]}"
snip = snip.replace("reg", f, 1)
replace_ct += 1
if not label:
snip = snip.replace("number", "100")
replace_ct += 1
return snip | ec25ecf4f2d46db398c389b479620e0cbcf30ee2 | 17,790 |
import torch
def make_observation_mapper(claims):
"""Make a dictionary of observation.
Parameters
----------
claims: pd.DataFrame
Returns
-------
observation_mapper: dict
an dictionary that map rv to their observed value
"""
observation_mapper = dict()
for c in claims.index:
s = claims.iloc[c]['source_id']
observation_mapper[f'b_{s}_{c}'] = torch.tensor(
claims.iloc[c]['value'])
return observation_mapper | 43052bd9ce5e1121f3ed144ec48acf20ad117313 | 17,791 |
def toCSV(
dataset, # type: BasicDataset
showHeaders=True, # type: Optional[bool]
forExport=False, # type: Optional[bool]
localized=False, # type: Optional[bool]
):
# type: (...) -> String
"""Formats the contents of a dataset as CSV (comma separated
values), returning the resulting CSV as a string.
If the "forExport" flag is set, then the format will be appropriate
for parsing using the `fromCSV` function.
Args:
dataset: The dataset to export to CSV.
showHeaders: If set to True, a header row will be present in
the CSV. Default is True. Optional.
forExport: If set to True, extra header information will be
present in the CSV data which is necessary for the CSV to be
compatible with the fromCSV method. Overrides showHeaders.
Default is False. Optional.
localized: If set to True, the string representations of the
values in the CSV data will be localized. Default is
False. Optional.
Returns:
The CSV data as a string.
"""
print(dataset, showHeaders, forExport, localized)
return "" | 9d998891a9712f42af8744513c1f61540eee0e2e | 17,792 |
def add_record(session, data):
"""
session -
data - dictionary {"site":"Warsaw"}
"""
skeleton = Skeleton()
skeleton.site = data["site"]
skeleton.location = data["location"]
skeleton.skeleton = data["skeleton"]
skeleton.observer = data["observer"]
skeleton.obs_date = data["obs_date"]
session.add(skeleton)
session.commit()
return skeleton.skeleton_id | f91df4459b37b7df4d313fd01323451bf897a754 | 17,793 |
def hue_angle(C):
"""
Returns the *hue* angle :math:`h` in degrees from given colour difference
signals :math:`C`.
Parameters
----------
C : array_like
Colour difference signals :math:`C`.
Returns
-------
numeric or ndarray
*Hue* angle :math:`h` in degrees.
Examples
--------
>>> C = np.array([
... -5.365865581996587e-05,
... -0.000571699383647,
... 0.000625358039467
... ])
>>> hue_angle(C) # doctest: +ELLIPSIS
269.2737594...
"""
C_1, C_2, C_3 = tsplit(C)
hue = (180 * np.arctan2(0.5 * (C_2 - C_3) / 4.5, C_1 -
(C_2 / 11)) / np.pi) % 360
return hue | 599f594eff92280df06a4c6ef88ccf286f146475 | 17,794 |
def get_submodel_list_copasi(model_name: str,
model_info: pd.DataFrame):
"""
This function loads a list of Copasi model files, which all belong to the
same benchmark model, if a string with the id of the benchmark model id is
provided.
It also extracts the respective sbml files from the list and returns them
with the models, if any postprecessing of the Copasi results is necessary
"""
# get information about the model from the tsv table
model_rows = model_info.loc[model_info['short_id'] == model_name]
# only take accepted models
model_rows = model_rows[model_rows['accepted']]
submodel_paths = [path for path in model_rows['copasi_path_final']]
# collect the submodels
copasi_file_list = []
sbml_model_list = []
for submodel_path in submodel_paths:
copasi_file, sbml_model = get_submodel_copasi(submodel_path, model_info)
if copasi_file is not None:
copasi_file_list.append(copasi_file)
sbml_model_list.append(sbml_model)
return copasi_file_list, sbml_model_list | ea889e5ea836131d8febc94dd69806b2acf47559 | 17,795 |
def GetNextBmask(enum_id, value):
"""
Get next bitmask in the enum (bitfield)
@param enum_id: id of enum
@param value: value of the current bitmask
@return: value of a bitmask with value higher than the specified
value. -1 if no such bitmasks exist.
All bitmasks are sorted by their values
as unsigned longs.
"""
return idaapi.get_next_bmask(enum_id, value) | d2c415e1a3ad63c651dc2df771dbe43a082613d9 | 17,796 |
def annotate_link(domain):
"""This function is called by the url tag. Override to disable or change behaviour.
domain -- Domain parsed from url
"""
return u" [%s]"%_escape(domain) | 26b5c8979cc8cd7f581a7ff889a907cf71844c72 | 17,797 |
def kmeans(data, k, num_iterations, num_inits=10, verbose=False):
"""Execute the k-means algorithm for
determining the best k clusters of data
points in a dataset.
Parameters
----------
data : ndarray, (n,d)
n data points in R^d.
k : int
The number of clusters to separate
the data into.
num_iterations : int
The number of iterations of the k-means
algorithm to execute.
num_inits : int, optional
Number of random initializations to try.
Returns the best result.
verbose : bool, optional
Specifies whether to print info about
the execution of the algorithm.
Return
------
(clusters, data_point_assigment, centroids)
The results of the k-means algorithm. Clusters
is a list of the clusters (which are lists of ints).
data_point_assigment is a (n,) numpy array of ints
that indicates which cluster a data point has been
assigned to. And centroids is (k,d) numpy array
specifying the cluster centers.
"""
# Number of data points
num_data_points = int(data.shape[0])
# Spatial dimension d
d = int(data.shape[1])
best_results = None
best_total_distance = np.inf
for init in range(num_inits):
# Map from data point index to cluster index.
data_point_assignment = np.zeros(num_data_points, dtype=int)
# list of data points in clusters
clusters = [[]] * k
# Initialize the centroids
# using k-randomly sampled points.
centroids = np.zeros((d,k))
for ind_cluster in range(k):
inds_data = np.random.choice(num_data_points, k)
centroid = np.mean(data[inds_data, :], axis=0)
centroids[:, ind_cluster] = centroid
for iteration in range(num_iterations):
if verbose:
print('==== Iteration {}/{} ===='.format(iteration+1, num_iterations))
print('centroids = {}'.format(centroids))
clusters = []
for ind_c in range(k):
clusters.append([])
# Assignment step:
# Assign each data point to the
# cluster with nearest centroid.
total_distance = 0.0
for ind_point in range(num_data_points):
distances = np.array([nla.norm(data[ind_point, :] - centroids[:, ind_c]) for ind_c in range(k)])
ind_cluster = np.argmin(distances)
total_distance += distances[ind_cluster]
data_point_assignment[ind_point] = ind_cluster
clusters[ind_cluster].append(ind_point)
# Update step:
# Update the centroids of the
# new clusters.
for ind_cluster in range(k):
cluster = clusters[ind_cluster]
cluster_data = np.array([data[ind_point, :] for ind_point in cluster])
centroid = np.mean(cluster_data, axis=0)
centroids[:, ind_cluster] = centroid
if total_distance < best_total_distance:
best_total_distance = total_distance
best_results = (clusters, data_point_assignment, centroids)
return best_results | 3cc3681ac0d0306fc7dce2da5757e6c162f7c457 | 17,798 |
def point_on_bezier_curve(cpw, n, u):
"""
Compute point on Bezier curve.
:param ndarray cpw: Control points.
:param int n: Degree.
:param u: Parametric point (0 <= u <= 1).
:return: Point on Bezier curve.
:rtype: ndarray
*Reference:* Algorithm A1.4 from "The NURBS Book".
"""
bernstein = all_bernstein(n, u)
pnt = zeros(4, dtype=float64)
for k in range(0, n + 1):
pnt += bernstein[k] * cpw[k]
return pnt | 3e4a494ff9ffabf6ad0d2711beba0e55647e7071 | 17,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.