content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from re import T
def detect_on_image(img_path, img_is_path=False, threshold=0.5, rect_th=1, text_th=1, text_size=1):
"""
img_path: absolute path, or an RGB tensor.
threshold: determines minimum confidence in order to consider prediction.
img_is_path: toggles if img_path is an absolute path or an RGB tensor.
"""
if img_is_path:
img = Image.open(img_path).convert("RGB")
else:
img = img_path
img = np.array(img)
#pointer to transformation function
#after transforming into pytorch tensor, puts it into composition
transform = T.Compose([T.ToTensor()])
#applies transformations, sends iimage to gpu defined on device_'cuda'
#forward pass, gets network output
pred = model([transform(img).cuda()])
#accesses the network prediction scores, detaches it, brings it to CPU and converts it into np array
pred_scores = list(pred[0]['scores'].detach().cpu().numpy())
#list of indices of every score above threshold
pred_t_list = [pred_scores.index(x) for x in pred_scores if x > threshold]
#index of the worst acceptable prediction score
if len(pred_t_list) == 0:
return None, None
pred_t = pred_t_list[-1]
masks = (pred[0]['masks'] > 0.5).squeeze().detach().cpu().numpy()
#gets the coco categories names of labels
pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].cpu().numpy())]
#list of tuples with x and y coordinates for boxes to be drawn
pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().cpu().numpy())]
#BUG: what if the worst is the last
masks = masks[:pred_t+1]
pred_boxes = pred_boxes[:pred_t+1]
pred_class = pred_class[:pred_t+1]
#RETURNED THIS::: masks, pred_boxes, pred_class, pred_scores[:pred_t+1]
pred_scores = pred_scores[:pred_t+1]
# for i in range(len(masks)):
# #rgb_mask = random_colour_masks(masks[i])
# if len(masks[i].shape) < 2:
# continue
# rgb_mask = get_coco_category_color_mask(masks[i], pred_class[i])
# img = cv2.addWeighted(img, 1, rgb_mask, 0.5, 0)
# img = cv2.rectangle(img, pred_boxes[i][0], pred_boxes[i][1], color=(0, 255, 0), thickness=rect_th)
# img = cv2.putText(img, f"{pred_class[i]}: {pred_scores[i]:.2f} >= {threshold:.2f}",
# pred_boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX,
# text_size, (0, 255, 0), thickness=text_th)
person_pred_boxes = []
for idx, box in enumerate(pred_boxes):
if pred_class[idx] == 'person' and pred_scores[idx] >= threshold:
person_pred_boxes.append(box)
return person_pred_boxes | 8741e5519b4177bd3471912ce7fcd19d2fb829b7 | 13,400 |
import random
def base_hillclimb(base_sol: tuple, neighbor_method: str, max_fevals: int, searchspace: Searchspace, all_results, kernel_options, tuning_options, runner, restart=True, randomize=True, order=None):
""" Hillclimbing search until max_fevals is reached or no improvement is found
Base hillclimber that evaluates neighbouring solutions in a random or fixed order
and possibly immediately moves to the neighbour if it is an improvement.
:params base_sol: Starting position for hillclimbing
:type base_sol: list
:params neighbor_method: Method to use to select neighboring parameter configurations to visit
during hillclimbing, either "Hamming", "strictly-adjacent" or "adjacent" are supported.
:type neighbor_method: string
:params max_fevals: Maximum number of unique function evaluations that is allowed
during the search.
:type max_fevals: int
:params searchspace: The searchspace object.
:type searchspace: Seachspace
:params all_results: List of dictionaries with all benchmarked configurations
:type all_results: list(dict)
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:params restart: Boolean that controls whether to greedely restart hillclimbing
from a new position as soon as an improved position is found. True by default.
:type restart: bool
:params randomize: Boolean that controls whether the dimensions of the tunable
parameters are randomized.
:type randomize: bool
:params order: Fixed order among the dimensions of the tunable parameters are
to be evaluated by the hillclimber.
:type order: list
:returns: The final position that was reached when hillclimbing halted.
:rtype: list
"""
if randomize and order:
raise ValueError("Using a preset order and randomize at the same time is not supported.")
tune_params = tuning_options.tune_params
# measure start point score
best_score = _cost_func(base_sol, kernel_options, tuning_options, runner, all_results, check_restrictions=False)
found_improved = True
while found_improved:
child = list(base_sol[:])
found_improved = False
current_results = []
vals = list(tune_params.values())
if order is None:
indices = list(range(len(vals)))
else:
indices = order
if randomize:
random.shuffle(indices)
# in each dimension see the possible values
for index in indices:
neighbors = searchspace.get_param_neighbors(tuple(child), index, neighbor_method, randomize)
# for each value in this dimension
for val in neighbors:
orig_val = child[index]
child[index] = val
# get score for this position
score = _cost_func(child, kernel_options, tuning_options, runner, current_results, check_restrictions=False)
# generalize this to other tuning objectives
if score < best_score:
best_score = score
base_sol = child[:]
found_improved = True
if restart:
break
else:
child[index] = orig_val
fevals = len(tuning_options.unique_results)
if fevals >= max_fevals:
all_results += current_results
return base_sol
if found_improved and restart:
break
# append current_results to all_results
all_results += current_results
return base_sol | 4007f66d14d52620b7917fb45a7701a8ec2ae96f | 13,401 |
def filter_dates(dates):
"""filter near dates"""
j = 0
while j < len(dates):
date = dates[j]
i = 3
j += 1
while True:
date += timedelta(days=1)
if date in dates:
i += 1
else:
if i > 2:
del dates[j:j+i-1]
break
return dates | 447f2e082672c8f37918fce02863bad1f141854b | 13,402 |
def biweight_location(a, c=6.0, M=None, axis=None, eps=1e-8):
"""
Copyright (c) 2011-2016, Astropy Developers
Compute the biweight location for an array.
Returns the biweight location for the array elements.
The biweight is a robust statistic for determining the central
location of a distribution.
The biweight location is given by the following equation
.. math::
C_{bl}= M+\\frac{\Sigma_{\|u_i\|<1} (x_i-M)(1-u_i^2)^2}
{\Sigma_{\|u_i\|<1} (1-u_i^2)^2}
where M is the sample mean or if run iterative the initial guess,
and u_i is given by
.. math::
u_{i} = \\frac{(x_i-M)}{cMAD}
where MAD is the median absolute deviation.
For more details, see Beers, Flynn, and Gebhardt, 1990, AJ, 100, 32B
Parameters
----------
a : array-like
Input array or object that can be converted to an array.
c : float, optional
Tuning constant for the biweight estimator. Default value is 6.0.
M : float, optional
Initial guess for the biweight location.
axis : tuple, optional
tuple of the integer axis values ot calculate over. Should be sorted.
Returns
-------
biweight_location : float
Returns the biweight location for the array elements.
Examples
--------
This will generate random variates from a Gaussian distribution and return
the biweight location of the distribution::
>>> from utils import biweight_location
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> cbl = biweight_location(randvar)
See Also
--------
median_absolute_deviation, biweight_midvariance
Note
--------
Copy of the astropy function with the "axis" argument added appropriately.
"""
if M is None:
if isinstance(a, np.ma.MaskedArray):
func = np.ma.median
else:
a = np.array(a, copy=False)
func = np.median
M = func(a, axis=axis)
else:
a = np.array(a, copy=False)
N = M*1.
# set up the difference
if axis is not None:
for i in axis:
N = np.expand_dims(N, axis=i)
d = a - N
# set up the weighting
if axis is not None:
MAD = median_absolute_deviation(a, axis=axis)
for i in axis:
MAD = np.expand_dims(MAD, axis=i)
else:
MAD = median_absolute_deviation(a)
u = np.where(MAD < eps, 0., d / c / MAD)
# now remove the outlier points
if isinstance(a, np.ma.MaskedArray):
mask = (np.abs(u) < 1).astype(np.int) * (1-a.mask.astype(np.int))
else:
mask = (np.abs(u) < 1).astype(np.int)
u = (1 - u ** 2) ** 2
return M + (d * u * mask).sum(axis=axis) / (u * mask).sum(axis=axis) | 4743b85f01f0d655a22f3b6037aaababcd375c7f | 13,403 |
import os
import shutil
def update_copy(src, dest):
"""
Possibly copy `src` to `dest`. No copy unless `src` exists.
Copy if `dest` does not exist, or mtime of dest is older than
of `src`.
Returns: None
"""
if os.path.exists(src):
if (not os.path.exists(dest) or
os.path.getmtime(dest) < os.path.getmtime(src)):
shutil.copy(src, dest)
return None | 4f83e633d9348cf8273309707713060e5611c277 | 13,404 |
def model_21(GPUS = 1):
""" one dense: 3000 """
model = Sequential()
model.add(Convolution3D(60, kernel_size = (3, 3, 3), strides = (1, 1, 1), input_shape = (9, 9, 9, 20))) # 32 output nodes, kernel_size is your moving window, activation function, input shape = auto calculated
model.add(BatchNormalization())
model.add(Activation(activation = 'relu'))
model.add(Convolution3D(60, (3, 3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation = 'relu'))
model.add(Convolution3D(60, (3, 3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation = 'relu'))
model.add(Flatten()) # now our layers have been combined to one
model.add(Dense(3000)) # 300 nodes in the last hidden layer
model.add(BatchNormalization())
model.add(Activation(activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(20, activation = 'softmax')) # output layer has 20 possible classes (amino acids 0 - 19)
if GPUS >= 2:
model = multi_gpu_model(model, gpus=GPUS)
return model | 990b1f1c8b1271d44cb371733f7cf17c7f288997 | 13,405 |
from datetime import datetime
def strWeekday(
date: str,
target: int,
after: bool = False,
) -> str:
"""
Given a ISO string `date` return the nearest `target` weekday.
**Parameters**
- `date`: The date around which the caller would like target searched.
- `target`: Weekday number as in the `datetime` Standard Library Module.
**Returns**
The ISO YYYY-MM-DD string representation of the nearest given weekday.
"""
dtdate = pd.to_datetime(date)
if datetime.datetime.weekday(dtdate) != target:
if not after:
date = str(dtdate - pd.offsets.Week(weekday=target)).split(" ")[0]
else:
date = str(dtdate + pd.offsets.Week(weekday=target)).split(" ")[0]
return date | d3511212cbbe8935b7acc7e63afff5b454aa039e | 13,406 |
def combine_bincounts_kernelweights(
xcounts, ycounts, gridsize, colx, coly, L, lenkernel, kernelweights, mid, binwidth
):
"""
This function combines the bin counts (xcounts) and bin averages (ycounts) with
kernel weights via a series of direct convolutions. As a result, binned
approximations to X'W X and X'W y, denoted by weigthedx and weigthedy, are computed.
Recall that the local polynomial curve estimator beta_ and its derivatives are
minimizers to a locally weighted least-squares problem. At each grid
point g = 1,..., M in the grid, beta_ is computed as the solution to the
linear matrix equation:
X'W X * beta_ = X'W y,
where W are kernel weights approximated by the Gaussian density function.
X'W X and X'W y are approximated by weigthedx and weigthedy,
which are the result of a direct convolution of bin counts (xcounts) and kernel
weights, and bin averages (ycounts) and kernel weights, respectively.
The terms "kernel" and "kernel function" are used interchangeably
throughout.
For more information see the documentation of the main function locpoly
under KernReg.locpoly.
Parameters
----------
xcounts: np.ndarry
1-D array of binned x-values ("bin counts") of length gridsize.
ycounts: np.ndarry
1-D array of binned y-values ("bin averages") of length gridsize.
gridsize: int
Number of equally-spaced grid points.
colx: int
Number of columns of output array weigthedx, i.e. the binned approximation to X'W X.
coly: int
Number of columns of output array weigthedy, i.e the binned approximation to X'W y.
lenkernel: int
Length of 1-D array kernelweights.
kernelweights: np.ndarry
1-D array of length lenfkap containing
approximated weights for the Gaussian kernel
(W in the notation above).
L: int
Parameter defining the number of times the kernel function
has to be evaluated.
Note that L < N, where N is the total number of observations.
mid: int
Midpoint of kernelweights.
binwidth: float
Bin width.
Returns
-------
weigthedx: np.ndarry
Dimensions (M, colx). Binned approximation to X'W X.
weigthedy: np.ndarry
Dimensions (M, coly). Binned approximation to X'W y.
"""
weigthedx = np.zeros((gridsize, colx))
weigthedy = np.zeros((gridsize, coly))
for g in range(gridsize):
if xcounts[g] != 0:
for i in range(max(0, g - L - 1), min(gridsize, g + L)):
if 0 <= i <= gridsize - 1 and 0 <= g - i + mid - 1 <= lenkernel - 1:
fac_ = 1
weigthedx[i, 0] += xcounts[g] * kernelweights[g - i + mid - 1]
weigthedy[i, 0] += ycounts[g] * kernelweights[g - i + mid - 1]
for j in range(1, colx):
fac_ = fac_ * binwidth * (g - i)
weigthedx[i, j] += (
xcounts[g] * kernelweights[g - i + mid - 1] * fac_
)
if j < coly:
weigthedy[i, j] += (
ycounts[g] * kernelweights[g - i + mid - 1] * fac_
)
return weigthedx, weigthedy | b283d3dd19720e7d5074a39866cb4cf5d55376d8 | 13,407 |
def get_icon_for_group(group):
"""Get the icon for an AOVGroup."""
# Group has a custom icon path so use. it.
if group.icon is not None:
return QtGui.QIcon(group.icon)
if isinstance(group, IntrinsicAOVGroup):
return QtGui.QIcon(":ht/rsc/icons/aovs/intrinsic_group.png")
return QtGui.QIcon(":ht/rsc/icons/aovs/group.png") | 8e6ea6f22901bc715a7b6ce02c68ca633bf9fe00 | 13,408 |
def unix_to_windows_path(path_to_convert, drive_letter='C'):
"""
For a string representing a POSIX compatible path (usually
starting with either '~' or '/'), returns a string representing an
equivalent Windows compatible path together with a drive letter.
Parameters
----------
path_to_convert : string
A string representing a POSIX path
drive_letter : string (Default : 'C')
A single character string representing the desired drive letter
Returns
-------
string
A string representing a Windows compatible path.
"""
if path_to_convert.startswith('~'):
path_to_convert = path_to_convert[1:]
if path_to_convert.startswith('/'):
path_to_convert = path_to_convert[1:]
path_to_convert = '{}{}{}'.format(drive_letter,
':\\',
path_to_convert).replace('/', '\\')
return path_to_convert | d3c23e2c19be4b81be135ae84760430be852da41 | 13,409 |
def recordview_create_values(
coll_id="testcoll", view_id="testview", update="RecordView", view_uri=None,
view_entity_type="annal:Test_default",
num_fields=4, field3_placement="small:0,12",
extra_field=None, extra_field_uri=None
):
"""
Entity values used when creating a record view entity
"""
view_values = (
{ 'annal:type': "annal:View"
, 'rdfs:label': "%s %s/%s"%(update, coll_id, view_id)
, 'rdfs:comment': "%s help for %s in collection %s"%(update, view_id, coll_id)
, 'annal:view_entity_type': view_entity_type
, 'annal:open_view': True
, 'annal:view_fields':
[ { 'annal:field_id': layout.FIELD_TYPEID+"/Entity_id"
, 'annal:field_placement': "small:0,12;medium:0,6"
}
, { 'annal:field_id': layout.FIELD_TYPEID+"/Entity_type"
, 'annal:field_placement': "small:0,12;medium:6,6"
}
, { 'annal:field_id': layout.FIELD_TYPEID+"/Entity_label"
, 'annal:field_placement': "small:0,12"
}
, { 'annal:field_id': layout.FIELD_TYPEID+"/Entity_comment"
# , 'annal:field_placement': field3_placement
}
]
})
if view_uri:
view_values['annal:uri'] = view_uri
if field3_placement:
view_values['annal:view_fields'][3]['annal:field_placement'] = field3_placement
if extra_field:
efd = (
{ 'annal:field_id': extra_field
, 'annal:field_placement': "small:0,12"
})
if extra_field_uri:
efd['annal:property_uri'] = extra_field_uri
view_values['annal:view_fields'].append(efd)
if num_fields == 0:
view_values['annal:view_fields'] = []
return view_values | a4b057acefd8f3e7c35b8412f0f0986d0440ab7a | 13,410 |
import math
def calculateZ(f, t2, a0, a1, a2=0, a3=0):
""" given the frequency array and the filter coefficients,
return Z(s) as a np.array()
"""
s = np.array(f)*2*math.pi*1j ####################
z = (1 + s*t2)/(s*(a3*s**3 + a2*s**2 + a1*s + a0))
return z | 56b2d349d3c279006c85a9dc9b8742395f1a6114 | 13,411 |
def get_team_project_default_permissions(team, project):
"""
Return team role for given project.
"""
perms = get_perms(team, project)
return get_role(perms, project) or "" | e6c41a1cc56c7ae3e51950508fbc1c514b6ebf7d | 13,412 |
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdPerm
from Carbon import Res
def readPlistFromResource(path, restype='plst', resid=0):
"""Read plst resource from the resource fork of path.
"""
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdPerm)
Res.UseResFile(resNum)
plistData = Res.Get1Resource(restype, resid).data
Res.CloseResFile(resNum)
return readPlistFromString(plistData) | 36d0387114d548d57f41351355c1fe5d948f70e3 | 13,413 |
def simple_satunet(
input_shape,
kernel=(2, 2),
num_classes=1,
activation="relu",
use_batch_norm=True,
dropout=0.1,
dropout_change_per_layer=0.0,
dropout_type="standard",
use_dropout_on_upsampling=False,
filters=8,
num_layers=4,
strides=(1, 1),
):
"""
Customisable UNet architecture (Ronneberger et al. 2015 https://arxiv.org/abs/1505.04597)
input_shape: shape (x, y, num_channels)
num_classes (int): 1 for binary segmentation
activation (str): A keras.activations.Activation to use. ReLu by default.
use_batch_norm (bool): Whether to use Batch Normalisation across the channel axis between convolutions
dropout (float , 0. and 1.): dropout after the first convolutional block. 0. = no dropout
dropout_change_per_layer (float , 0. and 1.): Factor to add to the Dropout after each convolutional block
dropout_type (one of "spatial" or "standard"): Spatial is recommended by https://arxiv.org/pdf/1411.4280.pdf
use_dropout_on_upsampling (bool): Whether to use dropout in the decoder part of the network
filters (int): Convolutional filters in the initial convolutional block. Will be doubled every block
num_layers (int): Number of total layers in the encoder not including the bottleneck layer
"""
upconv_filters = int(1.5 * filters)
# Build U-Net model
inputs = tf.keras.layers.Input(input_shape)
x = inputs
down_layers = []
for l in range(num_layers):
x = conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
strides=strides, # (1,1),
)
down_layers.append(x)
x = tf.keras.layers.MaxPooling2D(kernel)(x)
dropout += dropout_change_per_layer
# filters = filters * 2 # double the number of filters with each layer
x = conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
strides=strides, # (1,1),
)
if not use_dropout_on_upsampling:
dropout = 0.0
dropout_change_per_layer = 0.0
for conv in reversed(down_layers):
filters //= 2 # decreasing number of filters with each layer
dropout -= dropout_change_per_layer
# x = upsample(filters, kernel, strides=(2,2), padding="same")(x)#(2, 2)
x = tf.keras.layers.UpSampling2D(kernel)(x)
x = tf.keras.layers.concatenate([x, conv])
x = conv2d_block(
inputs=x,
filters=upconv_filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
)
# outputs = tf.keras.layers.Conv2D(num_classes, (1, 1), activation=output_activation)(x)
# ## classify
if num_classes == 1:
outputs = tf.keras.layers.Conv2D(
num_classes, (1, 1), padding="same", activation="sigmoid"
)(x)
else:
outputs = tf.keras.layers.Conv2D(
num_classes, (1, 1), padding="same", activation="softmax"
)(x)
model = tf.keras.models.Model(inputs=[inputs], outputs=[outputs])
return model | 1efdfb6dd15782543adb071b081b580ea78cb986 | 13,414 |
from datetime import datetime
def fracday2datetime(tdata):
"""
Takes an array of dates given in %Y%m%d.%f format and returns a
corresponding datetime object
"""
dates = [datetime.strptime(str(i).split(".")[0], "%Y%m%d").date()
for i in tdata]
frac_day = [i - np.floor(i) for i in tdata]
ratios = [(Fraction(i).limit_denominator().numerator,
Fraction(i).limit_denominator().denominator) for i in frac_day]
times = [datetime.strptime(
str(timedelta(seconds=timedelta(days=i[0]/i[1]).total_seconds())),
'%H:%M:%S').time() for i in ratios]
date_times = [datetime.combine(d, t) for d, t in zip(dates, times)]
return date_times | 03ca701317a6b80fd8c14eecddc84a380f16b3aa | 13,415 |
def train(ctx, cids, msday, meday, acquired):
"""Trains a random forest model for a set of chip ids
Args:
ctx: spark context
cids (sequence): sequence of chip ids [(x,y), (x1, y1), ...]
msday (int): ordinal day, beginning of training period
meday (int); ordinal day, end of training period
acquired (str): ISO8601 date range
Returns:
A trained model or None
"""
name = 'random-forest-training'
log = logger(ctx, name)
# wire everything up
aux = timeseries.aux(ctx=ctx,
cids=cids,
acquired=acquired)\
.filter('trends[0] NOT IN (0, 9)')\
.repartition(ccdc.PRODUCT_PARTITIONS).persist()
aid = aux.select(aux.cx, aux.cy).distinct()
ccd = pyccd.read(ctx, aid).filter('sday >= {} AND eday <= {}'.format(msday, meday))
fdf = features.dataframe(aux, ccd).persist()
if fdf.count() == 0:
log.info('No features found to train model')
return None
else:
log.debug('sample feature:{}'.format(fdf.first()))
log.debug('feature row count:{}'.format(fdf.count()))
log.debug('feature columns:{}'.format(fdf.columns))
model = pipeline(fdf).fit(fdf)
# manage memory
aux.unpersist()
fdf.unpersist()
return model | aa3abec936948277db2057047ebeb686eb7331a9 | 13,416 |
def flatten(iterable):
"""
Unpacks nested iterables into the root `iterable`.
Examples:
```python
from flashback.iterating import flatten
for item in flatten(["a", ["b", ["c", "d"]], "e"]):
print(item)
#=> "a"
#=> "b"
#=> "c"
#=> "d"
#=> "e"
assert flatten([1, {2, 3}, (4,), range(5, 6)]) == (1, 2, 3, 4, 5)
```
Params:
iterable (Iterable<Any>): the iterable to flatten
Returns:
tuple<Any>: the flattened iterable
"""
items = []
for item in iterable:
if isinstance(item, (list, tuple, set, frozenset, range)):
for nested_item in flatten(item):
items.append(nested_item)
else:
items.append(item)
return tuple(items) | 8c47de3255906fb114a13ecfec4bf4a1204a0dfd | 13,417 |
def get_file_info(bucket, filename):
"""Returns information about stored file.
Arguments:
bucket: a bucket that contains the file.
filename: path to a file relative to bucket root.
Returns:
FileInfo object or None if no such file.
"""
try:
stat = cloudstorage.stat(
'/%s/%s' % (bucket, filename), retry_params=_make_retry_params())
return FileInfo(size=stat.st_size)
except cloudstorage.errors.NotFoundError:
return None | f06c6c3f29cf15992d6880e6509b8ebe11d4288b | 13,418 |
import random
def generate_tree(depth, max_depth, max_args):
"""Generate tree-like equations.
Args:
depth: current depth of the node, int.
max_depth: maximum depth of the tree, int.
max_args: maximum number of arguments per operator, int.
Returns:
The root node of a tree structure.
"""
if depth < max_depth:
r = random.random()
else:
r = 1
if r > VALUE_P:
value = random.choice(VALUES)
return value, 1
else:
length = 2
num_values = random.randint(2, max_args)
values = []
for _ in range(num_values):
sub_t, sub_l = generate_tree(depth + 1, max_depth, max_args)
values.append(sub_t)
length += sub_l
op = random.choice(OPERATORS)
t = (op, values[0])
for value in values[1:]:
t = (t, value)
t = (t, END)
return t, length | df8c968444d86658d2d6f09fb836b39119998790 | 13,419 |
import os
def create_experiment(body): # noqa: E501
"""create a experiment
instantiate/start experiment # noqa: E501
:param body: Experiment Object
:type body: dict | bytes
:rtype: ApiResponse
"""
if connexion.request.is_json:
req = Experiment.from_dict(connexion.request.get_json()) # noqa: E501
urn = req.cluster
if 'urn' not in urn:
urn = os.getenv('URN_' + req.cluster)
elif 'authority+cm' not in urn:
urn = urn + '+authority+cm'
logger.info('urn = {}'.format(urn))
if ',' not in req.profile:
req.profile = emulab.EMULAB_PROJ + ',' + req.profile
if req.username is None:
req.username = emulab.EMULAB_EXPERIMENT_USER
if req.project is None:
req.project = emulab.EMULAB_PROJ
# update the profile from repo
update_repo_cmd = '{} sudo -u {} manage_profile updatefromrepo {}'.format(
emulab.SSH_BOSS, req.username, req.profile)
emulab.send_request(update_repo_cmd)
emulab_cmd = '{} sudo -u {} start-experiment -a {} -w --name {} --project {} {}'.format(
emulab.SSH_BOSS, req.username, urn, req.name, req.project, req.profile)
emulab_stdout = emulab.send_request(emulab_cmd)
return ApiResponse(code=0, output="Please use getExperiment to check whether success or fail") | bcebee7bcc8b33857311fd8231136c1983f35061 | 13,420 |
def Pow_sca(x_e, c_e, g, R0, R1, omega, epM):
"""Calculate the power scattered by an annulus
with a 'circling' electron as exciting source inside and
an electron moving on a slightly curved trajectory outside (vertical)
The trajectory of the electron derives from a straight vertical
trajectory in the ellipse frame.
Output: Resistive losses as a function of omega"""
# epM = 1-64/(omega*(omega+1j*gamma))
# omega = omega*Conv
k0 = omega/3e8
gamma_abs = 1j* np.pi**2 * ep0 * g**2/8 * k0**2
k_n = 1
###Lambda = 4*pi*eps0 in expression for source coefficients
###Calculate lambda according to formula in ELS_slab_crescent.pdf
a_n_s = np.exp(-omega/c_e*x_e)/omega*BesselI(1,omega*g/c_e)
#Calculate expansion coefficients as in ELS_ellipse_annulus.pdf
#This is for the cosine terms
b_c = (a_n_s /((epM-1)**2 * R0**(2) - (epM+1)**2 * R1**(2))\
*( (epM**2-1) * (R1**(2)-R0**(2))\
- 4*epM * R1**(2) * R0**(2) ) * R0**(-2)) - 1*a_n_s
#This is for the sin terms
b_s = (a_n_s/((epM-1)**2 * R0**(2) - (epM+1)**2 * R1**(2))\
*( -(epM**2-1) * (R1**(2)-R0**(2))\
- 4*epM * R1**(2) * R0**(2) ) * R0**(-2)) - 1*a_n_s
return omega/2 * np.imag(gamma_abs * (abs(b_c)**2 + abs(b_s)**2)) | aab79a2653428354d88ada4ded683d8eead6dd1e | 13,421 |
def read_images_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for image_index in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi"
)
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q"
)[0]
x_y_id_s = read_next_bytes(
fid,
num_bytes=24 * num_points2D,
format_char_sequence="ddq" * num_points2D,
)
xys = np.column_stack(
[
tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3])),
]
)
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id,
qvec=qvec,
tvec=tvec,
camera_id=camera_id,
name=image_name,
xys=xys,
point3D_ids=point3D_ids,
)
return images | 6da7916c0c74c4a9c58a91a5920d32ed8e3bbdf5 | 13,422 |
def index():
"""Render upload page."""
log_cmd('Requested upload.index', 'green')
return render_template('upload.html',
page_title='Upload',
local_css='upload.css',
) | 478f970f54a0c66443fbbfa24f44c86622e0e07f | 13,423 |
import tempfile
import json
import tarfile
import os
def pin_rsconnect(data, pin_name, pretty_pin_name, connect_server, api_key):
"""
Make a pin on RStudio Connect.
Parameters:
data: any object that has a to_json method (eg. pandas DataFrame)
pin_name (str): name of pin, only alphanumeric and underscores
pretty_pin_name (str): display name of pin
connect_server (str): RStudio Connect server address e.g. https://connect.example.com/
api_key (str): API key of a user on RStudio Connect
Return:
Url of content
"""
# Save data
local_dir = tempfile.TemporaryDirectory()
data.to_json(local_dir.name + "/data.txt")
# Create landing page
i = open(local_dir.name + "/index.html", "w")
lines = ["<h1>Python Pin", "\n"]
for line in lines:
i.write(line)
i.close()
# Create Manifest
manifest = {
"version": 1,
"locale": "en_US",
"platform": "3.5.1",
"metadata": {
"appmode": "static",
"primary_rmd": None,
"primary_html": "index.html",
"content_category": "pin",
"has_parameters": False,
},
"packages": None,
"files": None,
"users": None,
}
with open(local_dir.name + "/manifest.json", "w") as manifest_conn:
json.dump(manifest, manifest_conn)
# Turn into tarfile
pins_tf = tempfile.NamedTemporaryFile(delete=False)
with tarfile.open(pins_tf.name, "w:gz") as tar:
tar.add(local_dir.name, arcname=os.path.basename(local_dir.name))
auth = {"Authorization": "Key " + api_key}
content = get_content(pin_name, pretty_pin_name, connect_server, auth)
content_url = connect_server + "/__api__/v1/content/" + content["guid"]
# Upload Bundle
with open(pins_tf.name, "rb") as tf_conn:
bundle = req.post(content_url + "/bundles", headers=auth, data=tf_conn)
bundle_id = bundle.json()["id"]
# Deploy bundle
deploy = req.post(
content_url + "/deploy", headers=auth, json={"bundle_id": bundle_id}
)
return {"dash_url": content["dashboard_url"], "content_url": content["content_url"]} | 8f3fdc29988c1cbc6ceec7c201591502d791e69c | 13,424 |
import tqdm
def partial_to_full(dic1, dic2):
"""This function relates partial curves to full curves, according to the distances between them
The inputs are two dictionaries"""
C = []
D = []
F = []
# Calculate the closest full curve for all the partial curves under
# evaluation
for i in tqdm(dic1.keys()):
df = distance_cycle_to_full(i, dic1, dic2)
Distance = df['Distance'][df.index[0]]
Full_cycle = df['Cycle'][df.index[0]]
C.append(i)
D.append(Distance)
F.append(Full_cycle)
D = np.array(D)
C = np.array(C)
F = np.array(F)
return D, C, F | 31229ba4715e7241b205b81a207aae6e8290b93e | 13,425 |
import random
def _sample(probabilities, population_size):
"""Return a random population, drawn with regard to a set of probabilities"""
population = []
for _ in range(population_size):
solution = []
for probability in probabilities:
# probability of 1.0: always 1
# probability of 0.0: always 0
if random.uniform(0.0, 1.0) < probability:
solution.append(1)
else:
solution.append(0)
population.append(solution)
return population | ac781075f8437ea02b2dde3b241c21685c259e0c | 13,426 |
def nodal_scoping(node_ids, server = None):
"""Helper function to create a specific ``ansys.dpf.core.Scoping``
associated to a mesh.
Parameters
----------
node_ids : List of int
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the the global server.
Returns
-------
scoping : ansys.dpf.core.Scoping
"""
if not isinstance(node_ids, list):
raise dpf_errors.InvalidTypeError("list", "node_ids")
scoping = Scoping(server = server, ids = node_ids, location = locations.nodal)
return scoping | a8587a2027326e1c88088fac85a54879f28267d6 | 13,427 |
import json
def user_active(request):
"""Prevents auto logout by updating the session's last active time"""
# If auto logout is disabled, just return an empty body.
if not settings.AUTO_LOGOUT_SECONDS:
return HttpResponse(json.dumps({}), content_type="application/json", status=200)
last_active_at = set_session_user_last_active_at(request)
auto_logout_at = last_active_at + timedelta(seconds=settings.AUTO_LOGOUT_SECONDS)
auto_logout_warning_at = auto_logout_at - timedelta(seconds=settings.AUTO_LOGOUT_WARNING_AT_SECONDS_LEFT)
return HttpResponse(
json.dumps(
{
"auto_logout_at": auto_logout_at.isoformat(),
"auto_logout_warning_at": auto_logout_warning_at.isoformat(),
}
),
content_type="application/json",
status=200,
) | 332dd45457ab099a2775587dd357f5ccf9d663f7 | 13,428 |
def decode_labels(labels):
"""Validate labels."""
labels_decode = []
for label in labels:
if not isinstance(label, str):
if isinstance(label, int):
label = str(label)
else:
label = label.decode('utf-8').replace('"', '')
labels_decode.append(label)
return labels_decode | 36b8b10af2cd2868ab1923ccd1e620ccf815d91a | 13,429 |
def indent(text, num=2):
"""Indent a piece of text."""
lines = text.splitlines()
return '\n'.join(indent_iterable(lines, num=num)) | 04b547210463f50c0ddc7ee76547fea199e71bdc | 13,430 |
import random
def random_lever_value(lever_name):
"""Moves a given lever (lever_name) to a random position between 1 and 3.9"""
rand_val = random.randint(10, 39)/10 # Generate random value between 1 and 3.9
return move_lever([lever_name], [round(rand_val, 2)], costs = True) | c39781752b3defe164ad5d451932a71c99d95046 | 13,431 |
def back(update, context):
"""Кнопка назад."""
user = get_user_or_raise(update.effective_user.id)
update.message.reply_text(
messages.MAIN_MENU_MESSAGE, reply_markup=get_start_keyboard(user)
)
return ConversationHandler.END | 827070b4bcefad57afc8847f96a404a9272a0f7b | 13,432 |
def get_norm_residuals(vecs, word):
"""
computes normalized residuals of vectors with respect to a word
Args:
vecs (ndarray):
word (ndarray):
Returns:
tuple : (rvecs_n, rvec_flag)
CommandLine:
python -m ibeis.algo.hots.smk.smk_residuals --test-get_norm_residuals
Example:
>>> # ENABLE_DOCTEST
>>> # The case where vecs != words
>>> from ibeis.algo.hots.smk.smk_residuals import * # NOQA
>>> rng = np.random.RandomState(0)
>>> vecs = (hstypes.VEC_MAX * rng.rand(4, 128)).astype(hstypes.VEC_TYPE)
>>> word = (hstypes.VEC_MAX * rng.rand(1, 128)).astype(hstypes.VEC_TYPE)
>>> rvecs_n = get_norm_residuals(vecs, word)
>>> result = ut.numpy_str2(rvecs_n)
>>> print(result)
Example:
>>> # ENABLE_DOCTEST
>>> # The case where vecs == words
>>> from ibeis.algo.hots.smk.smk_residuals import * # NOQA
>>> rng = np.random.RandomState(0)
>>> vecs = (hstypes.VEC_MAX * rng.rand(4, 128)).astype(hstypes.VEC_TYPE)
>>> word = vecs[1]
>>> rvecs_n = get_norm_residuals(vecs, word)
>>> result = ut.numpy_str2(rvecs_n)
>>> print(result)
IGNORE
rvecs_agg8 = compress_normvec_uint8(arr_float)
rvecs_agg16 = compress_normvec_float16(arr_float)
ut.print_object_size(rvecs_agg16, 'rvecs_agg16: ')
ut.print_object_size(rvecs_agg8, 'rvecs_agg8: ')
ut.print_object_size(rvec_flag, 'rvec_flag: ')
%timeit np.isnan(_rvec_sums)
%timeit _rvec_sums == 0
%timeit np.equal(rvec_sums, 0)
%timeit rvec_sums == 0
%timeit np.logical_or(np.isnan(_rvec_sums), _rvec_sums == 0)
"""
# Compute residuals of assigned vectors
#rvecs_n = word.astype(dtype=FLOAT_TYPE) - vecs.astype(dtype=FLOAT_TYPE)
arr_float = np.subtract(word.astype(hstypes.FLOAT_TYPE), vecs.astype(hstypes.FLOAT_TYPE))
# Faster, but doesnt work with np.norm
#rvecs_n = np.subtract(word.view(hstypes.FLOAT_TYPE), vecs.view(hstypes.FLOAT_TYPE))
vt.normalize_rows(arr_float, out=arr_float)
# Mark null residuals
#_rvec_sums = arr_float.sum(axis=1)
#rvec_flag = np.isnan(_rvec_sums)
# Converts normvec to a smaller type like float16 or int8
rvecs_n = compress_normvec(arr_float)
# IF FLOAT16 WE NEED TO FILL NANS
# (but we should use int8, and in that case it is implicit)
# rvecs_n = np.nan_to_num(rvecs_n)
return rvecs_n | 8514f203907732c2d3175bf25f2803c93166687a | 13,433 |
def user_closed_ticket(request):
"""
Returns all closed tickets opened by user
:return: JsonResponse
"""
columns = _no_priority
if settings.SIMPLE_USER_SHOW_PRIORITY:
columns = _ticket_columns
ticket_list = Ticket.objects.filter(created_by=request.user,
is_closed=True)
dtd = TicketDTD( request, ticket_list, columns )
return JsonResponse(dtd.get_dict()) | 61c2be90937c4d8892dc8756428e3b191adc6d55 | 13,434 |
def review_lock_status(review_id):
"""
return status of review included trials (locked = T or F)
@param review_id: pmid of review
@return: boolean
"""
conn = dblib.create_con(VERBOSE=True)
cur = conn.cursor()
cur.execute(
"SELECT included_complete FROM systematic_reviews WHERE review_id = %s;",
(review_id,))
locked = cur.fetchone()[0]
conn.close()
return locked | 452731857800ac45c5cbbb7e158223e3055e807d | 13,435 |
def get_placekey_from_address(street_address:str, city:str, state:str, postal_code:str, iso_country_code:str='US',
placekey_api_key: str = None) -> str:
"""
Look up the full Placekey for a given address string.
:param street_address: Street address with suite, floor, or apartment.
:param city: The city.
:param state: Two character state identifier.
:param postal_code: Postal code identifier; typically five numbers.
:param iso_country_code: Two character country identifier. Defaults to "US".
:param placekey_api_key: Placekey API key for making requests.
:return: Placekey string.
"""
# check a couple of things for the parameter inputs
assert len(state) == 2, f'state must be two character identifier, not "{state}".'
assert len(iso_country_code) == 2, 'iso_country_code must be two character identifier, not ' \
f'"{iso_country_code}".'
body = {
"query": {
"street_address": street_address,
"city": city,
"region": state,
"postal_code": postal_code,
"iso_country_code": iso_country_code
}
}
pk = _get_placekey(body, placekey_api_key)
return pk | 0f3f911bb66a30138b8b293455d348f618e11486 | 13,436 |
import os
import time
async def upload_image(
request: Request,
file: UploadFile = File(...)
):
"""
upload image(jpg/jpeg/png/gif) to server and store locally
:return: upload result
"""
save_file_path = os.path.join(os.getcwd().split("app")[0], r"app/static/images")
# pic_uuid = str(uuid.uuid4())
file_name = file.filename
endfix = file_name.rpartition(".")[-1]
try:
content = await file.read()
# 对文件进行MD5校验 重复的无需再写
md5_vercation = md5(content)
md5_hash = md5_vercation.hexdigest()
file_url = f'/{env_api}/api/file/image/{md5_hash}'
file_path = os.path.join(save_file_path, f"{md5_hash}.{endfix}")
with request.pony_session:
file_obj = models.Upload.get(md5_hash=md5_hash)
if file_obj:
file_obj.file_path = file_path
file_obj.file_url = file_url
if not os.path.exists(file_path): # 判断如果不存在路径 需要重新写入
with open(file_path, "wb") as f:
f.write(content)
orm.commit()
# 说明已经写入过该文件了
return {"code": 20000, "msg": "Success, file info updated", "time": time.strftime('%Y-%m-%d %H:%M:%S'), "url": file_url}
# 文件属性
file_dict = {
"file_name": file_name,
"file_path": file_path,
"md5_hash": md5_hash,
"file_url": file_url,
"note": "image"
}
models.Upload(**file_dict)
orm.commit()
with open(file_path, "wb") as f:
print("写入路径", file_path)
f.write(content)
return {"code": 20000, "msg": "Success", "time": time.strftime('%Y-%m-%d %H:%M:%S'), "url": file_url}
except Exception as e:
return {"code": 50000, "msg": str(e), "time": time.strftime('%Y-%m-%d %H:%M:%S'), "url": ""} | 4df0841998ffb12cce5ad6fdb4df23f594a57d16 | 13,437 |
from pathlib import Path
def _path_to_str(var):
"""Make sure var is a string or Path, return string representation."""
if not isinstance(var, (Path, str)):
raise ValueError("All path parameters must be either strings or "
"pathlib.Path objects. Found type %s." % type(var))
else:
return str(var) | c5ae3ed06be31de3220b5400966866ccda29b9fc | 13,438 |
def netconf_edit_config(task: Task, config: str, target: str = "running") -> Result:
"""
Edit configuration of device using Netconf
Arguments:
config: Configuration snippet to apply
target: Target configuration store
Examples:
Simple example::
> nr.run(task=netconf_edit_config, config=desired_config)
"""
manager = task.host.get_connection("netconf", task.nornir.config)
manager.edit_config(config, target=target)
return Result(host=task.host) | 9862199c65ecbdc9eb037a181e5783eb911f76a1 | 13,439 |
def _cve_id_field_name():
""" Key name for a solr field that contains cve_id
"""
return "cve_id" | 68ca6f2585804e63198a20d3f174836a0cbb0841 | 13,440 |
def mapRuntime(dataFrame1, dataFrame2):
"""
Add the scraped runtimes of the titles in the viewing activity dataframe
Parameters:
dataFrame1: string
The name of the dataFrame to which the user wants to add the runtime
dataFrame2: string
The name of the dataFrame containging the runtimes.
Returns:
a dataFrame
"""
dataFrame1['Runtime'] = dataFrame1.Title.map(
dataFrame2.set_index('Title')['runtime'].to_dict())
return dataFrame1 | 61d4af72c51e61c6f0077b960e4002dd7d272ad8 | 13,441 |
def get_circ_center_2pts_r(p1, p2, r):
"""
Find the centers of the two circles that share two points p1/p2 and a radius.
From algorithm at http://mathforum.org/library/drmath/view/53027.html. Adapted from version at
https://rosettacode.org/wiki/Circles_of_given_radius_through_two_points#Python.
:param p1: First point , tuple (x, y)
:param p2: Second point, tuple (x, y)
:param r: Radius of circle
:return: a list of 2 points that are centers of circles of radius r sharing p1/p2
"""
if r == 0.0:
raise ValueError('No solution due to no radius')
(x1, y1), (x2, y2) = tuple(p1), tuple(p2)
if p1 == p2:
raise ValueError('Infinite numbre of solutions')
# Distance in x and y between points
dx = x2 - x1
dy = y1 - y2
# Dist between points
q = sqrt(dx ** 2 + dy ** 2)
if q > (2.0 * r):
raise ValueError('Too much distance between points to fit within radius')
# Halfway point
x3 = (x1 + x2) / 2.0
y3 = (y1 + y2) / 2.0
# Distance along the mirror line
d = sqrt(r ** 2 - ((q / 2.0) ** 2))
# First circle center
# c1 = (x3 + ((d * dy) / q), y3 + ((d * dx) / q))
# Second circle center
# c2 = (x3 - ((d * dy) / q), y3 - ((d * dx) / q))
c1x = x3 + sqrt(r ** 2 - (q / 2.0) ** 2) * (y1 - y2) / q
c1y = y3 + sqrt(r ** 2 - (q / 2.0) ** 2) * (x2 - x1) / q
c2x = x3 - sqrt(r ** 2 - (q / 2.0) ** 2) * (y1 - y2) / q
c2y = y3 - sqrt(r ** 2 - (q / 2.0) ** 2) * (x2 - x1) / q
return ((c1x, c1y), (c2x, c2y)) | 5ad9abe858721ad94c5d16cc8ed617dabe9f3336 | 13,442 |
def crext_MaxFragmentLength(length_exponent):
"""Create a MaxFragmentLength extension.
Allowed lengths are 2^9, 2^10, 2^11, 2^12. (TLS default is 2^14)
`length_exponent` should be 9, 10, 11, or 12, otherwise the extension will
contain an illegal value.
"""
maxlen = (length_exponent-8).to_bytes(1,"big")
return ExtensionType.max_fragment_length.value + lenprefix(maxlen) | 0078d372440dbe4914675efd004b4fb60f73a6d8 | 13,443 |
import torch
def perform_intervention(intervention, model, effect_types=('indirect', 'direct')):
"""Perform intervention and return results for specified effects"""
x = intervention.base_strings_tok[0] # E.g. The doctor asked the nurse a question. She
x_alt = intervention.base_strings_tok[1] # E.g. The doctor asked the nurse a question. He
with torch.no_grad():
candidate1_base_prob, candidate2_base_prob = model.get_probabilities_for_examples_multitoken(
x,
intervention.candidates_tok)
candidate1_alt_prob, candidate2_alt_prob = model.get_probabilities_for_examples_multitoken(
x_alt,
intervention.candidates_tok)
candidate1 = ' '.join(intervention.candidates[0]).replace('Ġ', '')
candidate2 = ' '.join(intervention.candidates[1]).replace('Ġ', '')
odds_base = candidate2_base_prob / candidate1_base_prob
odds_alt = candidate2_alt_prob / candidate1_alt_prob
total_effect = (odds_alt - odds_base) / odds_base
results = {
'base_string1': intervention.base_strings[0],
'base_string2': intervention.base_strings[1],
'candidate1': candidate1,
'candidate2': candidate2,
'candidate1_base_prob': candidate1_base_prob,
'candidate2_base_prob': candidate2_base_prob,
'odds_base': odds_base,
'candidate1_alt_prob': candidate1_alt_prob,
'candidate2_alt_prob': candidate2_alt_prob,
'odds_alt': odds_alt,
'total_effect': total_effect,
}
for effect_type in effect_types:
candidate1_probs_head, candidate2_probs_head, candidate1_probs_layer, candidate2_probs_layer,\
candidate1_probs_model, candidate2_probs_model = model.attention_intervention_experiment(
intervention, effect_type)
odds_intervention_head = candidate2_probs_head / candidate1_probs_head
odds_intervention_layer = candidate2_probs_layer / candidate1_probs_layer
odds_intervention_model = candidate2_probs_model / candidate1_probs_model
effect_head = (odds_intervention_head - odds_base) / odds_base
effect_layer = (odds_intervention_layer - odds_base) / odds_base
effect_model = (odds_intervention_model - odds_base) / odds_base
results[effect_type + "_odds_head"] = odds_intervention_head.tolist()
results[effect_type + "_effect_head"] = effect_head.tolist()
results[effect_type + "_effect_layer"] = effect_layer.tolist()
results[effect_type + "_effect_model"] = effect_model
return results | 3fae717923adda6d4b08c424c24600d578961a2a | 13,444 |
def nice_size(
self: complex,
unit: str = 'bytes',
long: bool = False,
lower: bool = False,
precision: int = 2,
sep: str = '-',
omissions: list = 'mono deca hecto'.split(),
):
"""
This should behave well on int subclasses
"""
mag = magnitude(self, omissions)
precision = sredro[mag] if self < 5 else precision
unit = set_case(set_length(mag, unit, long, sep), lower)
val = round(self * 10 ** -(sredro[mag]), precision)
return lasso(val, unit) | 1361f17e98ce4d5c6f9c094b8a4f1a9e7cf3035b | 13,445 |
from .core.observable.fromcallback import _from_callback
from typing import Callable
from typing import Optional
import typing
def from_callback(func: Callable,
mapper: Optional[typing.Mapper] = None
) -> Callable[[], Observable]:
"""Converts a callback function to an observable sequence.
Args:
func: Function with a callback as the last argument to
convert to an Observable sequence.
mapper: [Optional] A mapper which takes the arguments
from the callback to produce a single item to yield on
next.
Returns:
A function, when executed with the required arguments minus
the callback, produces an Observable sequence with a single
value of the arguments to the callback as a list.
"""
return _from_callback(func, mapper) | b93900f480d5dd851d8e45c00627590ad89fd24c | 13,446 |
import re
def EVLAUVFITS(inUV, filename, outDisk, err, compress=False, \
exclude=["AIPS HI", "AIPS AN", "AIPS FQ", "AIPS SL", "AIPS PL"], \
include=[], headHi=False, logfile=""):
"""
Write UV data as FITS file
Write a UV data set as a FITAB format file
History written to header
* inUV = UV data to copy
* filename = name of FITS file, any whitespace characters replaced with underscore
* outDisk = FITS directory number
* err = Python Obit Error/message stack
* exclude = List of table types NOT to copy
NB: "AIPS HI" isn't really a table and gets copied anyway
* include = List of table types to copy (FQ, AN always done )
Exclude has presidence over include
* headHi = if True move history to header, else leave in History table
returns FITS UV data object
"""
################################################################
mess = "Write Data to FITS UV data "+filename+" on disk "+str(outDisk)
printMess(mess, logfile)
# Checks
if not UV.PIsA(inUV):
raise TypeError("inUV MUST be a Python Obit UV")
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
#
# Deblank filename
fn = re.sub('\s','_',filename)
# Set output
outUV = UV.newPFUV("FITS UV DATA", fn, outDisk, False, err)
if err.isErr:
OErr.printErrMsg(err, "Error creating FITS data")
#Compressed?
if compress:
inInfo = UV.PGetList(outUV) #
dim = [1,1,1,1,1]
InfoList.PAlwaysPutBoolean (inInfo, "Compress", dim, [True])
# Copy
UV.PCopy (inUV, outUV, err)
if err.isErr:
OErr.printErrMsg(err, "Error copying UV data to FITS")
# History
inHistory = History.History("inhistory", outUV.List, err)
outHistory = History.History("outhistory", outUV.List, err)
# Add history
outHistory.Open(History.READWRITE, err)
outHistory.TimeStamp(" Start Obit uvtab",err)
outHistory.WriteRec(-1,"uvtab / FITS file "+fn+" disk "+str(outDisk),err)
outHistory.Close(err)
# History in header?
if headHi:
History.PCopy2Header (inHistory, outHistory, err)
OErr.printErrMsg(err, "Error with history")
# zap table
outHistory.Zap(err)
# Copy Tables
UV.PCopyTables (inUV, outUV, exclude, include, err)
return outUV | 98de4f1422be2281eca539b9c372e8d0b9980aeb | 13,447 |
def form_cleaner(querydict):
"""
Hacky way to transform form data into readable data by the model constructor
:param querydict: QueryDict
:return: dict
"""
r = dict(querydict.copy())
# Delete the CRSF Token
del r['csrfmiddlewaretoken']
for key in list(r):
# Take first element of array
r[key] = r[key][0]
# Delete empty fields
if r[key] == '' or r[key] is None:
del r[key]
return r | 83d61f028748132803555da85f0afe0215be2edd | 13,448 |
def has_1080p(manifest):
"""Return True if any of the video tracks in manifest have a 1080p profile
available, else False"""
return any(video['width'] >= 1920
for video in manifest['videoTracks'][0]['downloadables']) | f187ff7fd8f304c0cfe600c4bed8e809c4c5e105 | 13,449 |
def visualize_table(filename: str, table: str) -> bool:
"""
Formats the contents of a db table using the texttable package
:param filename: .db file name (String)
:param table: Name of the table to plot (String)
:return: Bool
"""
conn, cursor = get_connection(filename)
table_elements = get_table(filename, table)
if not len(table_elements) > 0:
print("This table is empty")
return False
text_table = Texttable()
allign = ["l" for i in range(len(table_elements[0]))]
vallign = ["m" for i in range(len(table_elements[0]))]
title = eval(query(filename, "tables", "name", table)[0][1])
text_table.set_cols_align(allign)
text_table.set_cols_valign(vallign)
text_table.header(title)
for row in table_elements:
text_table.add_row(row)
print(text_table.draw())
return True | d7ab8125353ac0550a704ba208a8095f82125294 | 13,450 |
def extractIsekaiMahou(item):
"""
# Isekai Mahou Translations!
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Isekai Mahou Chapter' in item['title'] and 'Release' in item['title']:
return buildReleaseMessageWithType(item, 'Isekai Mahou wa Okureteru!', vol, chp, frag=frag, postfix=postfix)
return False | 8537e5d9374c38aa0218d380013e98c4bfe6eabb | 13,451 |
def delete_version_from_file(fname, par, ctype=gu.PIXEL_MASK, vers=None, cmt=None, verb=False) :
"""Delete specified version from calibration constants.
Parameters
- fname : full path to the hdf5 file
- par : psana.Event | psana.Env | float - tsec event time
- ctype : gu.CTYPE - enumerated calibration type, e.g.: gu.PIXEL_MASK
- vers : int - calibration version
- cmt : str - comment
- verb : bool - verbousity
See :py:class:`DCMethods`
"""
metname = sys._getframe().f_code.co_name
str_ctype = gu.dic_calib_type_to_name[ctype]
if verb : print ' %s.delete_version_from_file: ctype: %s vers: %s'%\
(metname, str_ctype, vers)
if not is_good_fname(fname, verb) : return None
cs = DCStore(fname)
cs.load()
ct = cs.ctypeobj(str_ctype)
if ct is None : return None
#ct.print_obj()
tsec = dcu.par_to_tsec(par)
cr = ct.range_for_tsec(tsec)
if cr is None : return None
v = vers if vers is not None else cr.vnum_last()
vdel = cr.mark_version(vnum=vers, cmt=cmt)
if verb : log.setPrintBits(02) # 0377
cs.save()
if verb :
print 50*'_','\nDCStore.print_obj() after delete version %s' % str(vdel)
cs.print_obj()
return vdel | 2f5e6d180457f140c8195e358ffa6afbae8a227d | 13,452 |
import os
def get_filename(filePath):
"""get filename without file extension from file path
"""
absFilePath = os.path.abspath(filePath)
return os.path.basename(os.path.splitext(absFilePath)[0]) | e9ccddf29f38f88ccd65764a2914689611b142e8 | 13,453 |
from typing import List
from typing import Tuple
import io
def create_midi_file(notes: List[Tuple[int, int]]) -> io.BytesIO:
"""Create a MIDI file from the given list of notes.
Notes are played with piano instrument.
"""
byte_stream = io.BytesIO()
mid = mido.MidiFile()
track = mido.MidiTrack()
mid.tracks.append(track)
for note, t in notes:
track.append(mido.Message('note_on', note=note, velocity=64))
track.append(mido.Message('note_off', note=note, time=t))
mid.save(file=byte_stream)
return io.BytesIO(byte_stream.getvalue()) | 1f9443df11f08a76c9d5c472d025fe92f3d459af | 13,454 |
async def get_programs(request: Request) -> Response:
"""
description: Get a list of all programs
responses:
200:
description: A list of programs.
"""
ow: "OpenWater" = request.app.ow
return ToDictJSONResponse([p.to_dict() for p in ow.programs.store.all]) | d4a16ec19ba4e095c0479a43f2ef191e9dae84f5 | 13,455 |
def create_dataframe(dictionary_to_convert, cols):
"""
From a Dictionary which is passed, and the desired column to create, this function
returns a Dataframe.
"""
dataframe_converted = pd.DataFrame.from_dict(dictionary_to_convert, orient='index', columns = cols)
dataframe_converted = dataframe_converted.reset_index()
dataframe_converted = dataframe_converted.drop(columns=['index'])
return dataframe_converted | 4f2ad388cd9a12a6aee55e974320c8b7ac7f95a7 | 13,456 |
def br_candidates(data, br_remaining, commit):
"""Find candidates not yet included to be added to br(r,*)
Given the list of remaining, that is not yet taken into account
bug fixes, split this list into part that has been created (fixed)
before creation time of given bug report, and those that were
created (fixed) later.
Creation times of bugfix commits, and the date when given bug
report was creates is taken from the augmented combined bugs+fixes
dataset. The list of remaining fixes (as shortened SHA-1
identifiers, which are keys in the dataset to bug report + bug fix
info) needs to be sorted in ascending chronological order of
bugfix commit creation date. Returned lists are also sorted; the
original list is split in two.
Parameters
----------
data : dict | collections.OrderedDict
Combined data about bug reports and bugfix commits, read from
the JSON file.
br_remaining : list
List of remaining keys to data (of shortened SHA-1
ideintifiers of bugfix commits), sorted in the bugfix commit
creation time order. This means that the `commit` timestamp
divides this list into two parts: first that has commit
creation date not later than creation date of given bugfix,
and those that are later.
/-- t(r)
|
[c_0, c_1,...,c_i, | c_{i+1},...,c_{N-1}]
where t(c_j) < t_(c_{j+1}) and t(c_i) < t(r) <= t(c_{i+1}).
commit : str
Identifier of the bug report, all bugfix commits added to the
returned list have commit date not later than bug report
creation date.
TODO?: maybe change the name of this parameter.
Returns
-------
(br_new, br_remaining) : (list, list)
First list in returned pair is the list of bugfix commits from
`br_remaining` with creation time earlier than bug report
creation time of `commit`. Because `br_remaining` is assumed
to be sorted this would be some number of elements from the
start of it, and it would also be sorted. Possibly empty.
Second list in returned pair is the list of remaining bugfix
commits, with creation time later than cration time of given
bug report. Possibly empty.
These two lists are (br_remaining[:i], br_remaining[i:]) for
some value of i.
"""
this_bug_ts = int(data[commit]['bug_report']['timestamp'])
this_fix_ts = int(data[commit]['commit']['metadata']['timestamp'])
commit_list = []
# DEBUG
#print('commit =%s (bug_ts=%d) / bug_id=%d' %
# (commit, this_bug_ts, int(data[commit]['bug_report']['bug_id'])))
# corner cases
if not br_remaining:
# DEBUG
#print('br_candidates: empty list')
# no candidates
return ([], [])
elif this_bug_ts <= int(data[br_remaining[0]]['commit']['metadata']['timestamp']):
# DEBUG
#print('br_candidates: early return %d < %d' %
# (this_bug_ts, int(data[br_remaining[0]]['commit']['metadata']['timestamp'])))
# all commits are later (newer) than given bug
return ([], br_remaining)
elif int(data[br_remaining[-1]]['commit']['metadata']['timestamp']) < this_bug_ts:
# even last commit is earlier (older) than given bug
# NOTE: should never happen in this code
return (br_remaining, [])
for (i,v) in enumerate(br_remaining):
curr_bug_ts = int(data[v]['bug_report']['timestamp'])
curr_fix_ts = int(data[v]['commit']['metadata']['timestamp'])
if not curr_fix_ts < this_bug_ts:
return (br_remaining[:i], br_remaining[i:]) | 4885a6b894fbed140db2f6da6f84daba481bf2ef | 13,457 |
import pandas
def aggregate_dataframe(mails_per_sender, datetimes_per_sender):
"""Engineer features and aggregate them in a dataframes.
:param dict mails_per_sender: A dictionary with email counts for each sender
:param dict datetimes_per_sender: A dictionary with datetime objects for
each sender
:raises InputError: if at least one of the arguments is an empty dictionary
:returns: A dataframe with aggregated features
:rtype: pandas.DataFrame
"""
try:
if not mails_per_sender or not datetimes_per_sender:
raise exceptions.InputError('At least one of the arguments is an '
'empty dictionary!')
except exceptions.InputError:
raise
average_timestamps = average_timestamps_in_seconds(
datetimes_per_sender)
average_weekdays = weekday_average(datetimes_per_sender)
aggregation = {'Mail Count': mails_per_sender,
'Average Timestamp': average_timestamps,
'Average Weekday': average_weekdays}
return pandas.DataFrame(aggregation) | a584d72fdb2df9148b5ff6a6fe907c8f09b26234 | 13,458 |
def setup_databases(verbosity, interactive, keepdb=False, debug_sql=False, parallel=0, aliases=None, **kwargs):
"""Create the test databases.
This function is a copy of the Django setup_databases with one addition.
A Tenant object is created and saved when setting up the database.
"""
test_databases, mirrored_aliases = get_unique_databases_and_mirrors(aliases)
old_names = []
for db_name, aliases in test_databases.values():
first_alias = None
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, first_alias is None))
# Actually create the database for the first connection
if first_alias is None:
first_alias = alias
test_db_name = connection.creation.create_test_db(
verbosity=verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=connection.settings_dict.get("TEST", {}).get("SERIALIZE", True),
)
try:
tenant, created = Tenant.objects.get_or_create(schema_name=Tenant._TEMPLATE_SCHEMA)
if created:
tenant.save()
tenant.create_schema()
tenant, created = Tenant.objects.get_or_create(schema_name=KokuTestRunner.schema)
if created:
tenant.save()
tenant.create_schema()
customer, __ = Customer.objects.get_or_create(
account_id=KokuTestRunner.account, schema_name=KokuTestRunner.schema
)
with tenant_context(tenant):
for tag_key in OCP_ENABLED_TAGS:
OCPEnabledTagKeys.objects.get_or_create(key=tag_key)
data_loader = NiseDataLoader(KokuTestRunner.schema, customer)
# Obtain the day_list from yaml
read_yaml = UploadAwsTree(None, None, None, None)
tree_yaml = read_yaml.import_yaml(yaml_file_path="scripts/aws_org_tree.yml")
day_list = tree_yaml["account_structure"]["days"]
# Load data
# TODO: COST-444: This NiseDataLoader to be removed and replaced with the commented baker_data_loaders below.
data_loader = NiseDataLoader(KokuTestRunner.schema, customer)
data_loader.load_openshift_data(customer, "ocp_aws_static_data.yml", "OCP-on-AWS")
data_loader.load_aws_data(customer, "aws_static_data.yml", day_list=day_list)
data_loader.load_openshift_data(customer, "ocp_azure_static_data.yml", "OCP-on-Azure")
data_loader.load_azure_data(customer, "azure_static_data.yml")
bakery_data_loader = ModelBakeryDataLoader(KokuTestRunner.schema, customer)
ocp_on_aws_cluster_id = "OCP-on-AWS"
ocp_on_azure_cluster_id = "OCP-on-Azure"
ocp_on_gcp_cluster_id = "OCP-on-GCP"
ocp_on_prem_cluster_id = "OCP-on-Prem"
# TODO: COST-444: uncomment these when the above data_loader is removed
# ocp_on_aws_ocp_provider, ocp_on_aws_report_periods = bakery_data_loader.load_openshift_data(
# ocp_on_aws_cluster_id, on_cloud=True
# )
# ocp_on_azure_ocp_provider, ocp_on_azure_report_periods = bakery_data_loader.load_openshift_data(
# ocp_on_azure_cluster_id, on_cloud=True
# )
# ocp_on_gcp_ocp_provider, ocp_on_gcp_report_periods = bakery_data_loader.load_openshift_data(
# ocp_on_gcp_cluster_id, on_cloud=True
# )
# _, __ = bakery_data_loader.load_openshift_data(ocp_on_prem_cluster_id, on_cloud=False)
# _, aws_bills = bakery_data_loader.load_aws_data(
# linked_openshift_provider=ocp_on_aws_ocp_provider, day_list=day_list
# )
# _, azure_bills = bakery_data_loader.load_azure_data(
# linked_openshift_provider=ocp_on_azure_ocp_provider
# )
_, gcp_bills = bakery_data_loader.load_gcp_data()
# bakery_data_loader.load_openshift_on_cloud_data(
# Provider.PROVIDER_AWS_LOCAL, ocp_on_aws_cluster_id, aws_bills, ocp_on_aws_report_periods
# )
# bakery_data_loader.load_openshift_on_cloud_data(
# Provider.PROVIDER_AZURE_LOCAL,
# ocp_on_azure_cluster_id,
# azure_bills,
# ocp_on_azure_report_periods,
# )
# bakery_data_loader.load_openshift_on_cloud_data(
# Provider.PROVIDER_GCP_LOCAL, ocp_on_gcp_cluster_id, gcp_bills, ocp_on_gcp_report_periods
# )
for account in [("10002", "acct10002"), ("12345", "acct12345")]:
tenant = Tenant.objects.get_or_create(schema_name=account[1])[0]
tenant.save()
tenant.create_schema()
Customer.objects.get_or_create(account_id=account[0], schema_name=account[1])
except Exception as err:
LOG.error(err)
raise err
if parallel > 1:
for index in range(parallel):
connection.creation.clone_test_db(suffix=str(index + 1), verbosity=verbosity, keepdb=keepdb)
else:
connection.creation.set_as_test_mirror(connections[first_alias].settings_dict)
# Configure the test mirrors.
for alias, mirror_alias in mirrored_aliases.items():
connections[alias].creation.set_as_test_mirror(connections[mirror_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names | fc47c571c40f56f6dabc96bd04e270e874b99d6b | 13,459 |
import sys
def about_view(request):
"""
This view gives information about the version and software we have loaded.
"""
evaluation = get_session_evaluation(request.session)
definitions = evaluation.definitions
system_info = mathics_system_info(definitions)
return render(
request,
"about.html",
{
"django_version": django_version,
"three_js_version": get_threejs_version(),
"mathics_threejs_backend_version": get_mathics_threejs_backend_version(),
"MathJax_version": get_MathJax_version(),
"mathics_version": mathics_version_info["mathics"],
"mathics_django_version": __version__,
"mpmath_version": mathics_version_info["mpmath"],
"numpy_version": mathics_version_info["numpy"],
"python_version": mathics_version_info["python"],
"sympy_version": mathics_version_info["sympy"],
"SystemID": system_info["$SystemID"],
"SystemTimeZone": system_info["$SystemTimeZone"],
"UserName": system_info["$UserName"],
"BaseDirectory": system_info["$BaseDirectory"],
"HomeDirectory": system_info["$HomeDirectory"],
"InstallationDirectory": system_info["$InstallationDirectory"],
"RootDirectory": system_info["$RootDirectory"],
"TemporaryDirectory": system_info["$TemporaryDirectory"],
"DB_PATH": MATHICS_DJANGO_DB_PATH,
"DOC_DATA_PATH": DOC_USER_HTML_DATA_PATH,
"HTTP_USER_AGENT": request.META.get("HTTP_USER_AGENT", ""),
"REMOTE_USER": request.META.get("REMOTE_USER", ""),
"REMOTE_ADDR": request.META.get("REMOTE_ADDR", ""),
"REMOTE_HOST": request.META.get("REMOTE_HOST", ""),
"MachinePrecision": system_info["MachinePrecision"],
"MemoryAvailable": system_info["MemoryAvailable[]"],
"SystemMemory": system_info["$SystemMemory"],
"Machine": system_info["$Machine"],
"MachineName": system_info["$MachineName"],
"ProcessID": system_info["$ProcessID"],
"ProcessorType": system_info["$ProcessorType"],
"PythonVersion": sys.version,
"user_settings": get_user_settings(evaluation),
},
) | c2d3a8d1c640a3ad6bd7b955e745c288aea87049 | 13,460 |
import os
def is_trace_directory(path: str) -> bool:
"""
Check recursively if a path is a trace directory.
:param path: the path to check
:return: `True` if it is a trace directory, `False` otherwise
"""
path = os.path.expanduser(path)
if not os.path.isdir(path):
return False
return impl.is_trace_directory(path) | d0f0a7ef323072196e1b007d88c94de0576d4137 | 13,461 |
def traj2points(traj, npoints, OS):
"""
Transform spoke trajectory to point trajectory
Args:
traj: Trajectory with shape [nspokes, 3]
npoints: Number of readout points along spokes
OS: Oversampling
Returns:
array: Trajectory with shape [nspokes, npoints, 3]
"""
[nspokes, ndim] = np.shape(traj)
r = (np.arange(0, npoints))/OS
Gx, Gy, Gz = np.meshgrid(r, np.arange(nspokes), np.arange(ndim))
traj_p = Gx*np.transpose(np.tile(traj, [npoints, 1, 1]), [1, 0, 2])
return traj_p | f411b91e86943f7ae03f52cf3d6b1005299902ba | 13,462 |
import re
import os
def parse_headings(raw_contents, file_):
"""Parse contents looking for headings. Return a tuple with number
of TOC elements, the TOC fragment and the number of warnings."""
# Remove code blocks
parsable_contents = re.sub(r"```[\s\S]+?```", "", raw_contents)
# Parse H1,H2,H3
headings = re.findall(r"^(#|##|###)\s+(.*)", parsable_contents,
re.MULTILINE)
toc = "Table of contents:" + os.linesep
tocsize = 0
warnings = 0
count_h1 = 0
count_h2 = 0
for heading in headings:
level = heading[0]
level = (1 if level == "#" else
2 if level == "##" else
3 if level == "###" else None)
assert level is not None
title = heading[1].strip()
if level == 1:
count_h1 += 1
if count_h1 > 1:
warnings += 1
print("WARNING: found more than one H1 in "+file_)
continue
if level == 2:
count_h2 += 1
hash_ = headinghash(title)
indent = ""
if level == 3:
if count_h2:
# If there was no H2 yet then H3 shouldn't have indent.
indent = " " * 2
toc += indent + "* [%s](#%s)" % (title, hash_) + os.linesep
tocsize += 1
if tocsize <= 1:
# If there is only one H2/H3 heading do not create TOC.
toc = ""
tocsize = 0
return tocsize, toc, warnings | 9569ba74878852ad95598ef42bb66d3df89c09c8 | 13,463 |
import os
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask | ef5ee2405241f880b44b8d6abb56bee4a1e04512 | 13,464 |
def model_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
constraint=None,
trainable=True,
collections=None,
**kwargs):
"""
Get or create a model variable.
When the variable is created, it will be added to both `GLOBAL_VARIABLES`
and `MODEL_VARIABLES` collection.
Args:
name: Name of the variable.
shape: Shape of the variable.
dtype: Data type of the variable.
initializer: Initializer of the variable.
regularizer: Regularizer of the variable.
constraint: Constraint of the variable.
trainable (bool): Whether or not the variable is trainable?
collections: In addition to `GLOBAL_VARIABLES` and `MODEL_VARIABLES`,
also add the variable to these collections.
\\**kwargs: Other named arguments passed to :func:`tf.get_variable`.
Returns:
tf.Variable: The variable.
"""
collections = list(set(
list(collections or ()) +
[tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.MODEL_VARIABLES]
))
return tf.get_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
constraint=constraint,
trainable=trainable,
collections=collections,
**kwargs
) | 7c717234fca10163708abf19057e68124b8fe3e8 | 13,465 |
def default_k_pattern(n_pattern):
""" the default number of pattern divisions for crossvalidation
minimum number of patterns is 3*k_pattern. Thus for n_pattern <=9 this
returns 2. From there it grows gradually until 5 groups are made for 40
patterns. From this point onwards the number of groups is kept at 5.
bootstrapped crossvalidation also uses this function to set k, but scales
n_rdm to the expected proportion of samples retained when bootstrapping
(1-np.exp(-1))
"""
if n_pattern < 12:
k_pattern = 2
elif n_pattern < 24:
k_pattern = 3
elif n_pattern < 40:
k_pattern = 4
else:
k_pattern = 5
return k_pattern | 60d083ffed24987882fa8074d99e37d06748eaf3 | 13,466 |
def resize_basinData():
"""
read in global data and make the new bt with same length
this step can be elimated if we are using ibtracks in the future CHAZ development
"""
basinName = ['atl','wnp','enp','ni','sh']
nd = 0
for iib in range(0,len(basinName),1):
ib = basinName[iib]
f =gv.ipath + 'bt_'+ib+'.nc'
#bt1 = nc.Dataset(f)
bt1 = xr.open_dataset(f)
if iib == 0:
maxCol = bt1['PIslp'].shape[0]
else:
maxCol = np.nanmax([maxCol,bt1['PIslp'].shape[0]])
## in bt1, the time is datenumber start from 1800,01,01,0,0. So if datenumber is 0 means there is no data
nd += bt1['PIslp'].shape[1]
bt = {}
for iib in range(0,len(basinName),1):
bt1 = xr.open_dataset(f)
for iv in bt1.variables.keys():
if iib == 0:
if np.size(bt1.variables[iv].shape) >1:
bt[iv] = np.zeros([maxCol,bt1.variables[iv].shape[1]])*np.float('nan')
bt[iv][:bt1.variables[iv].shape[0],:] = bt1.variables[iv].values
else:
bt[iv] = bt1.variables[iv].values
else:
if np.size(bt1.variables[iv].shape) >1:
dummy = np.zeros([maxCol,bt1.variables[iv].shape[1]])*np.float('nan')
dummy[:bt1.variables[iv].shape[0],:] = bt1.variables[iv].values
bt[iv] = np.hstack([bt[iv],dummy])
del dummy
else:
bt[iv] = np.hstack([bt[iv],bt1.variables[iv].values])
del bt1
for iv in bt.keys():
if ((np.size(bt[iv].shape) >1) and ('Time' not in iv)):
bt[iv][bt['Time']==0] = np.float('nan')
bt['Time'][bt['Time']!=bt['Time']]=0
return bt | f80892b79cbe12f00daa0918ccf1ac579c90193d | 13,467 |
def _cast_wf(wf):
"""Cast wf to a list of ints"""
if not isinstance(wf, list):
if str(type(wf)) == "<class 'numpy.ndarray'>":
# see https://stackoverflow.com/questions/2060628/reading-wav-files-in-python
wf = wf.tolist() # list(wf) does not convert int16 to int
else:
wf = list(wf) # fallback
if len(wf) > 0:
assert isinstance(wf[0], int), f"first element of wf wasn't an int, but a {type(wf[0])}"
return wf | cf2bf853b3ac021777a65d5323de6990d8dc4c5c | 13,468 |
import string
import os
def encoded_path(root, identifiers, extension = ".enc", depth = 3, digest = True):
"""generate a unique file-accessible path from the given list of identifiers
starting at the given root directory."""
ident = string.join(identifiers, "_")
if digest:
ident = sha.new(ident).hexdigest()
ident = os.path.basename(ident)
tokens = []
for d in range(1, depth):
tokens.append(ident[0:d])
dir = os.path.join(root, *tokens)
verify_directory(dir)
return os.path.join(dir, ident + extension) | 16c9b810ca98e5fc2c48ce403f7af83e40066a08 | 13,469 |
def centralize_scene(points):
"""In-place centralize a whole scene"""
assert points.ndim == 2 and points.shape[1] >= 3
points[:, 0:2] -= points[:, 0:2].mean(0)
points[:, 2] -= points[:, 2].min(0)
return points | 3bdbbe5e3e9c1383852afd15910bb23a68e75506 | 13,470 |
def ms(val):
""" Turn a float value into milliseconds as an integer. """
return int(val * 1000) | 97f7d736ead998014a2026a430bf3f0c54042010 | 13,471 |
def render_doc(stig_rule, deployer_notes):
"""Generate documentation RST for each STIG configuration."""
template = JINJA_ENV.get_template('template_doc_rhel7.j2')
return template.render(
rule=stig_rule,
notes=deployer_notes
) | 97167c23b7b9550bac9f8722ac9f9baed21e060e | 13,472 |
def official_evaluate(reference_csv_path, prediction_csv_path):
"""Evaluate metrics with official SED toolbox.
Args:
reference_csv_path: str
prediction_csv_path: str
"""
reference_event_list = sed_eval.io.load_event_list(reference_csv_path,
delimiter='\t', csv_header=False,
fields=['filename','onset','offset','event_label'])
estimated_event_list = sed_eval.io.load_event_list(prediction_csv_path,
delimiter='\t', csv_header=False,
fields=['filename','onset','offset','event_label'])
evaluated_event_labels = reference_event_list.unique_event_labels
files={}
for event in reference_event_list:
files[event['filename']] = event['filename']
evaluated_files = sorted(list(files.keys()))
segment_based_metrics = sed_eval.sound_event.SegmentBasedMetrics(
event_label_list=evaluated_event_labels,
time_resolution=1.0
)
for file in evaluated_files:
reference_event_list_for_current_file = []
for event in reference_event_list:
if event['filename'] == file:
reference_event_list_for_current_file.append(event)
estimated_event_list_for_current_file = []
for event in estimated_event_list:
if event['filename'] == file:
estimated_event_list_for_current_file.append(event)
segment_based_metrics.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file
)
results = segment_based_metrics.results()
return results | 718da1a97cb73e382b45c43432f4b991eab93732 | 13,473 |
from pathlib import Path
import yaml
def get_notebooks():
"""Read `notebooks.yaml` info."""
path = Path("tutorials") / "notebooks.yaml"
with path.open() as fh:
return yaml.safe_load(fh) | 232ffc1820f29eddc9ded118b69ea8e6857b00c9 | 13,474 |
def getSimData(startDate, endDate, region):
""" Get all boundary condition data needed for a simulation run
Args:
startDate (string): Start date DD.MM.YYYY
(start time is hard coded to 00:00)
endDate (string): End date DD.MM.YYYY
(end day is not in time range, so end date
should be end date + 1 day)
region (string): Location of simulation (determines climate / weather)
Supported regions:
East, West, South, North
Returns:
int / np float (arrays): nSteps, time, SLP_PHH, SLP_BSLa, SLP_BSLc,
HWP, T, Eg
"""
data = getSimData_df(startDate, endDate, region)
return (data.time.size, data.time,
data.SLP,
data.HWPfactor.to_numpy(dtype=np.float32),
data.Weather,
data.SolarPosition
) | 65e7c3a18194eeac4781d57c412cbb079c1078ba | 13,475 |
def get_dag_path(pipeline, module=None):
"""
Gets the DAG path.
:@param pipeline: The Airflow Variable key that has the config.
:@type pipeline: str.
:@param module: The module that belongs to the pipeline.
:@type module: str.
:@return: The DAG path of the pipeline.
"""
if module is None:
module = pipeline
config = Variable.get(pipeline, deserialize_json=True)
return pp.join(config['dag_install_path'], '{}_dag.py'.format(module)) | 4b0a9e5d9692d3c2477e23cb4ba988c589fb9b96 | 13,476 |
def blow_up(polygon):
"""Takes a ``polygon`` as input and adds pixels to it according to the following rule. Consider the line between two
adjacent pixels in the polygon (i.e., if connected via an egde). Then the method adds additional equidistand pixels
lying on that line (if the value is double, convert to int), dependent on the x- and y-distance of the pixels.
:param polygon: input polygon that should be blown up
:type polygon: Polygon
:return: blown up polygon
"""
res = Polygon()
for i in range(1, polygon.n_points, 1):
x1 = polygon.x_points[i - 1]
y1 = polygon.y_points[i - 1]
x2 = polygon.x_points[i]
y2 = polygon.y_points[i]
diff_x = abs(x2 - x1)
diff_y = abs(y2 - y1)
# if (x1,y1) = (x2, y2)
if max(diff_x, diff_y) < 1:
if i == polygon.n_points - 1:
res.add_point(x2, y2)
continue
res.add_point(x1, y1)
if diff_x >= diff_y:
for j in range(1, diff_x, 1):
if x1 < x2:
xn = x1 + j
else:
xn = x1 - j
yn = int(round(y1 + (xn - x1) * (y2 - y1) / (x2 - x1)))
res.add_point(xn, yn)
else:
for j in range(1, diff_y, 1):
if y1 < y2:
yn = y1 + j
else:
yn = y1 - j
xn = int(round(x1 + (yn - y1) * (x2 - x1) / (y2 - y1)))
res.add_point(xn, yn)
if i == polygon.n_points - 1:
res.add_point(x2, y2)
return res | c48005af11b8e1982aa45218159169acca0bd145 | 13,477 |
from typing import Optional
import time
def x_sogs_raw(
s: SigningKey,
B: PublicKey,
method: str,
full_path: str,
body: Optional[bytes] = None,
*,
b64_nonce: bool = True,
blinded: bool = False,
timestamp_off: int = 0,
):
"""
Calculates X-SOGS-* headers.
Returns 4 elements: the headers dict, the nonce bytes, timestamp int, and signature bytes.
Use x_sogs(...) instead if you don't need the nonce/timestamp/signature values.
"""
n = x_sogs_nonce()
ts = int(time.time()) + timestamp_off
if blinded:
a = s.to_curve25519_private_key().encode()
k = sodium.crypto_core_ed25519_scalar_reduce(
blake2b(sogs.crypto.server_pubkey_bytes, digest_size=64)
)
ka = sodium.crypto_core_ed25519_scalar_mul(k, a)
kA = sodium.crypto_scalarmult_ed25519_base_noclamp(ka)
pubkey = '15' + kA.hex()
else:
pubkey = '00' + s.verify_key.encode().hex()
to_sign = [B.encode(), n, str(ts).encode(), method.encode(), full_path.encode()]
if body:
to_sign.append(blake2b(body, digest_size=64))
if blinded:
H_rh = sha512(s.encode())[32:]
r = sodium.crypto_core_ed25519_scalar_reduce(sha512([H_rh, kA, *to_sign]))
sig_R = sodium.crypto_scalarmult_ed25519_base_noclamp(r)
HRAM = sodium.crypto_core_ed25519_scalar_reduce(sha512([sig_R, kA, *to_sign]))
sig_s = sodium.crypto_core_ed25519_scalar_add(
r, sodium.crypto_core_ed25519_scalar_mul(HRAM, ka)
)
sig = sig_R + sig_s
else:
sig = s.sign(b''.join(to_sign)).signature
h = {
'X-SOGS-Pubkey': pubkey,
'X-SOGS-Nonce': sogs.utils.encode_base64(n) if b64_nonce else n.hex(),
'X-SOGS-Timestamp': str(ts),
'X-SOGS-Signature': sogs.utils.encode_base64(sig),
}
return h, n, ts, sig | 6184f7f719c8d1e9e8e14fef56a65cd1d87f9f4f | 13,478 |
import json
def get_param(param, content, num=0):
"""
在内容中获取某一参数的值
:param param: 从接口返回值中要提取的参数
:param content: 接口返回值
:param num: 返回值中存在list时,取指定第几个
:return: 返回非变量的提取参数值
"""
param_val = None
if "." in param:
patt = param.split('.')
param_val = httprunner_extract(content, patt)
return param_val
else:
if isinstance(content, str):
try:
content = json.loads(content)
except:
content = ""
if isinstance(content, dict):
param_val = get_param_response(param, content, num)
if isinstance(content, list):
dict_data = {}
for i in range(len(content)):
try:
dict_data[str(i)] = eval(content[i])
except:
dict_data[str(i)] = content[i]
param_val = get_param_response(param, dict_data, num)
if param_val is None:
return param_val
else:
if "$" + param == param_val:
param_val = None
return param_val | d912c3ee22c223b4f9a91dc4817fc54a79139c20 | 13,479 |
def make_thebig_df_from_data(strat_df_list, strat_names):
"""Joins strategy data frames into a single df - **The Big DF** -
Signature of The Big DF:
df(strategy, sim_prefix, exec, node)[metrics]
"""
thebig_df = pd.concat(strat_df_list, axis=0, keys=strat_names)
thebig_df.index.set_names("strategy", level=0, inplace=True)
return thebig_df | 8ce67464a18fde5e81e0c8fd0c2a2d7ea016730e | 13,480 |
from datetime import datetime
def first_weekday_date(date):
"""
Filter - returns the date of the first weekday for the date
Usage (in template):
{{ some_date|first_weekday_date }}
"""
week_start = date - datetime.timedelta(days=date.weekday())
return week_start.date() | 8c7466040bff9e1924dbe365b92d796afe976fed | 13,481 |
def isLto():
"""*bool* = "--lto" """
return options.lto | ada2d688b1fe84fbcbb585c28e9d6251cce3dcd9 | 13,482 |
def runtime():
"""Get the CumulusCI runtime for the current working directory."""
init_logger()
return CliRuntime() | e4c3ed275f08cc7b982550714fa5c66c78ed1aa4 | 13,483 |
def order_json_objects(obj):
"""
Recusively orders all elemts in a Json object.
Source:
https://stackoverflow.com/questions/25851183/how-to-compare-two-json-objects-with-the-same-elements-in-a-different-order-equa
"""
if isinstance(obj, dict):
return sorted((k, order_json_objects(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(order_json_objects(x) for x in obj)
return obj | 5a0459d227b0a98c536290e3e72b76424d29820c | 13,484 |
def CalculatePEOEVSA(mol, bins=None):
"""
#################################################################
MOE-type descriptors using partial charges and surface
area contributions.
chgBins=[-.3,-.25,-.20,-.15,-.10,-.05,0,.05,.10,.15,.20,.25,.30]
You can specify your own bins to compute some descriptors
Usage:
result=CalculatePEOEVSA(mol)
Input: mol is a molecule object
Output: result is a dict form
#################################################################
"""
temp = MOE.PEOE_VSA_(mol, bins, force=1)
res = {}
for i, j in enumerate(temp):
res["PEOEVSA" + str(i)] = round(j, 3)
return res | 2b51c65f70b93bee80be5eba740319ab53eeb992 | 13,485 |
def install_pyheif_from_pip() -> int:
"""
Install the python module pyheif from PIP.
Assumes required libraries already installed
:return: return code from pip
"""
print("Installing Python support for HEIF / HEIC...")
cmd = make_pip_command(
'install {} -U --disable-pip-version-check pyheif'.format(pip_user)
)
return popen_capture_output(cmd) | b4d9a2d8d08e9e6dde4ac828dd34dfc93dd6ca02 | 13,486 |
def adjust_mlb_names(mlb_id, fname, lname):
"""
Adjusts a prospect's first and last name (fname, lname) given their mlb.com player_id for better usage in matching to the professional_prospects table.
"""
player_mapper = {
}
qry = """SELECT wrong_name
, right_fname
, right_lname
FROM NSBL.name_mapper nm
;"""
res = db.query(qry)
for row in res:
wrong, right_fname, right_lname = row
player_mapper[wrong] = [right_fname, right_lname]
if mlb_id in player_mapper:
fname, lname = player_mapper.get(mlb_id)
return fname, lname
else:
return fname, lname | 2570cd47e3875e1c621f6b4c7c8659c6edca1d6e | 13,487 |
from typing import Callable
from typing import Any
def all_predicates(*predicates: Callable[[Any], bool]) -> Callable[[Any], bool]:
"""Takes a set of predicates and returns a function that takes an entity
and checks if it satisfies all the predicates.
>>> even_and_prime = all_predicates(is_even, is_prime)
>>> even_and_prime(2)
True
>>> even_and_prime(4)
False
>>> even_and_prime(3)
False
Added in version: 0.1.0
"""
return lambda entity: all((p(entity) for p in predicates)) | b531e848e3a24851c5bc756beae46bdd14311b1f | 13,488 |
def centered_rand(l):
"""Sample from U(-l, l)"""
return l*(2.*np.random.rand()-1.) | f8cc1a8c6ad190b53061e1e83a410aa5cdcf26ed | 13,489 |
import torch
def compute_rays_length(rays_d):
"""Compute ray length.
Args:
rays_d: [R, 3] float tensor. Ray directions.
Returns:
rays_length: [R, 1] float tensor. Ray lengths.
"""
rays_length = torch.norm(rays_d, dim=-1, keepdim=True) # [N_rays, 1]
return rays_length | 9b43f9ea79708a690282a04eec65dbabf4a7ae36 | 13,490 |
import itertools
def _repeat_elements(arr, n):
"""
Repeats the elements int the input array, e.g.
[1, 2, 3] -> [1, 1, 1, 2, 2, 2, 3, 3, 3]
"""
ret = list(itertools.chain(*[list(itertools.repeat(elem, n)) for elem in arr]))
return ret | 95cf8ebb75505d2704cf957cdd709b8fa735973a | 13,491 |
def get_neighbors_table(embeddings, method, ntrees=None):
"""
This is a factory method for cosine distance nearest neighbor methods.
Args:
embeddings (ndarray): The embeddings to index
method (string): The nearest neighbor method to use
ntrees (int): number of trees for annoy
Returns:
Nearest neighbor table
"""
if method == "annoy":
if ntrees is None:
raise ImproperParameterSpecificationException("ntrees must be defined")
table = AnnoyNeighborsTable(embeddings, ntrees)
elif method == "brute":
table = BruteForceNeighborsTable(embeddings)
else:
raise MethodNotImplementedException("{} is not an implemented method".format(method))
return table | ee665e8332bf0f9b4c2e1ed38cf8b328a10cfc9b | 13,492 |
def _compute_positional_encoding(
attention_type,
position_encoding_layer,
hidden_size,
batch_size,
total_length,
seq_length,
clamp_length,
bi_data,
dtype=tf.float32):
"""Computes the relative position encoding.
Args:
attention_type: str, the attention type. Can be "uni" (directional) or
"bi" (directional).
position_encoding_layer: An instance of `RelativePositionEncoding`.
hidden_size: int, the hidden size.
batch_size: int, the batch size.
total_length: int, the sequence length added to the memory length.
seq_length: int, the length of each sequence.
clamp_length: int, clamp all relative distances larger than clamp_length. -1
means no clamping.
bi_data: bool, whether to use bidirectional input pipeline. Usually set to
True during pretraining and False during finetuning.
dtype: the dtype of the encoding.
Returns:
A Tensor, representing the position encoding.
"""
freq_seq = tf.range(0, hidden_size, 2.0)
if dtype is not None and dtype != tf.float32:
freq_seq = tf.cast(freq_seq, dtype=dtype)
if attention_type == "bi":
beg, end = total_length, -seq_length
elif attention_type == "uni":
beg, end = total_length, -1
else:
raise ValueError("Unknown `attention_type` {}.".format(attention_type))
if bi_data:
forward_position_sequence = tf.range(beg, end, -1.0)
backward_position_sequence = tf.range(-beg, -end, 1.0)
if dtype is not None and dtype != tf.float32:
forward_position_sequence = tf.cast(forward_position_sequence,
dtype=dtype)
backward_position_sequence = tf.cast(backward_position_sequence,
dtype=dtype)
if clamp_length > 0:
forward_position_sequence = tf.clip_by_value(
forward_position_sequence,
-clamp_length,
clamp_length)
backward_position_sequence = tf.clip_by_value(
backward_position_sequence,
-clamp_length,
clamp_length)
if batch_size is not None:
forward_positional_encoding = position_encoding_layer(
forward_position_sequence, batch_size // 2)
backward_positional_encoding = position_encoding_layer(
backward_position_sequence, batch_size // 2)
else:
forward_positional_encoding = position_encoding_layer(
forward_position_sequence, None)
backward_positional_encoding = position_encoding_layer(
backward_position_sequence, None)
relative_position_encoding = tf.concat(
[forward_positional_encoding, backward_positional_encoding], axis=0)
else:
forward_position_sequence = tf.range(beg, end, -1.0)
if dtype is not None and dtype != tf.float32:
forward_position_sequence = tf.cast(
forward_position_sequence, dtype=dtype)
if clamp_length > 0:
forward_position_sequence = tf.clip_by_value(
forward_position_sequence,
-clamp_length,
clamp_length)
relative_position_encoding = position_encoding_layer(
forward_position_sequence, batch_size)
return relative_position_encoding | fe7a87510745aa2c4b7b5f9e3225464d32e4a00e | 13,493 |
def _get_instance_id(instance_list, identity):
"""
Return instance UUID by name or ID, if found.
"""
for i in instance_list.items:
if identity in (i.properties.name, i.id):
return i.id
return None | f466e10028e9b84f23bd4ace1f02ad8f792517ee | 13,494 |
import os
def DBConnect(dwhName=None):
"""
Parameters
----------
dwhName :
Default value = None)
Returns
-------
"""
conn = mysql.connect(host=os.getenv('DBT_MYSQL_HOST'), user="root",
database=dwhName, buffered=True)
cur = conn.cursor()
print("Successfully Connected!")
return conn, cur | abab780180b35b4e5062072c38a1dfa2eb23e666 | 13,495 |
def isPageWatched(user, trunk):
"""Is the page being watched by the user?"""
result = (models.Subscription.all().
filter('user =', user).
filter('trunk =', trunk).
filter('method !=', models.Subscription.METH_MEH))
return result.count(1) != 0 | 07335b32e11ef275a8c23281e295ed175b2b5854 | 13,496 |
def encode(number, base):
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# Handle unsigned numbers only for now
assert number >= 0, 'number is negative: {}'.format(number)
# TODO: Encode number in binary (base 2)
numbers = []
while number > 0:
remainder = number % base
if number < base:
remainder = number
number = number//base
numbers.append(value_digit[remainder])
numbers.reverse()
numbers_string = ''.join(numbers)
return numbers_string
# TODO: Encode number in hexadecimal (base 16)
# ...
# TODO: Encode number in any base (2 up to 36)
# ... | d430b98af798b298b855b0573bf63e5ff9e1eeb9 | 13,497 |
def _parse_constants():
"""Read the code in St7API and parse out the constants."""
def is_int(x):
try:
_ = int(x)
return True
except ValueError:
return False
with open(St7API.__file__) as f_st7api:
current_comment = None
seen_comments = set()
f_stripped = (l.strip() for l in f_st7api)
for l in f_stripped:
is_comment_line = l.startswith("#")
is_blank_line = not l
is_constant_line = "=" in l and is_int(l.split("=")[1])
if is_comment_line:
if l in seen_comments:
raise ValueError(f"Duplicate comment {l}")
if is_comment_line:
current_comment = l
elif is_blank_line:
current_comment = None
elif is_constant_line:
if current_comment:
name, val = [x.strip() for x in l.split("=")]
yield current_comment, name, val | ca3c10b1eda6d46f86e21f318b960781b8875cc3 | 13,498 |
def verify_outcome(msg, prefix, lista):
"""
Compare message to list of claims: values.
:param prefix: prefix string
:param lista: list of claims=value
:return: list of possible strings
"""
assert msg.startswith(prefix)
qsl = ["{}={}".format(k, v[0]) for k, v in parse_qs(msg[len(prefix) :]).items()]
return set(qsl) == set(lista) | dd24e16c3029c911b939af4a50f4c7c7a71c8722 | 13,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.