content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import json
def update_alert():
""" Make Rest API call to security graph to update an alert """
if flask.request.method == 'POST':
flask.session.pop('UpdateAlertData', None)
result = flask.request.form
flask.session['VIEW_DATA'].clear()
alert_data = {_: result[_] for _ in result} # Iterate over html form POST from Graph.html
if alert_data.get('AlertId'): # Id form was not empty
alert_data['AlertId'] = alert_data.get('AlertId').strip(' ')
else:
flask.session['VIEW_DATA']['UpdateAlertError'] = "Please enter valid alert Id"
return flask.redirect(flask.url_for('homepage'))
alert_id = alert_data['AlertId']
old_alert = get_alert_by_id(alert_id) # store old alert before updating it
if not old_alert: # alert not found
flask.session['VIEW_DATA']['UpdateAlertError'] = "No alert matching this ID " + alert_id + " was found"
return flask.redirect(flask.url_for('homepage'))
else:
flask.session['VIEW_DATA']['OldAlert'] = old_alert
properties_to_update = {}
properties_to_update["assignedTo"] = flask.session['email']
if alert_data.get("SelectStatusToUpdate") != "Unknown":
properties_to_update["status"] = alert_data.get("SelectStatusToUpdate")
if alert_data.get("SelectFeedbackToUpdate") != "Unknown":
properties_to_update["feedback"] = alert_data.get("SelectFeedbackToUpdate")
if alert_data.get("Comments") != "":
comments = old_alert.get("comments")
new_comment = alert_data.get("Comments")
comments.append(new_comment)
properties_to_update["comments"] = comments
# include the required vendor information in the body of the PATCH
properties_to_update["vendorInformation"] = old_alert.get("vendorInformation")
# update the alert
update_security_alert(alert_id, properties_to_update)
# make another call to graph to get the updated alert
updated_alert = get_alert_by_id(alert_id)
# store the alert to be rendered in the table in Graph.html
flask.session['VIEW_DATA']['UpdateAlertResults'] = updated_alert
flask.session['VIEW_DATA']['UpdateQueryDetails'] = "REST query PATCH: '" \
+ config.SECURITYAPI_URL \
+ "alerts/" \
+ alert_id \
+ "'"
flask.session['VIEW_DATA']['UpdateQueryBody'] = "Request Body: " \
+ json.dumps(properties_to_update,
sort_keys=True,
indent=4,
separators=(',', ': '))
flask.session['UpdateAlertData'] = alert_data
return flask.redirect(flask.url_for('homepage')) | 52248bda1271fbd39ab056c5384f6318e30cc712 | 11,648 |
def simulate_bet(odds, stake):
"""
Simulate the bet taking place assuming the odds accurately represent the probability of the event
:param odds: numeric: the odds given for the event
:param stake: numeric: the amount of money being staked
:return: decimal: the returns from the bet
"""
probability = odds_to_prob(odds)
if np.random.rand() <= probability:
return stake * (1 + odds)
else:
return 0 | 0a56bc4b9a3071cc777786a1a7cf8b1410e3f941 | 11,649 |
from typing import OrderedDict
def lrcn(num_classes, lrcn_time_steps, lstm_hidden_size=200, lstm_num_layers=2):
"""
Args:
num_classes (int):
Returns:
torch.nn.modules.module.Module
"""
class TimeDistributed(nn.Module):
def __init__(self, layer, time_steps):
super(TimeDistributed, self).__init__()
# self.layers = nn.ModuleList([layer for _ in range(time_steps)])
self.layers = nn.ModuleList([nn.Linear(10, 10) for _ in range(time_steps)])
def forward(self, x):
batch_size, time_steps, *_ = x.size()
# outputs = list()
for i, layer in enumerate(self.layers):
x = layer(x)
# output_t = layer(x[:, i])
# if i == 0:
# output = output_t.unsqueeze(1)
# else:
# output = torch.cat((output, output_t.unsqueeze(1)), 1)
# outputs.append(output_t)
# output = torch.stack(outputs, dim=1)
# return output
return x
class BiLSTMHidden2Dense(nn.Module):
def __init__(self):
super(BiLSTMHidden2Dense, self).__init__()
def forward(self, x):
lstm_output, (hn, cn) = x
lstm_last_hidden_state = hn[-2:].transpose(0, 1).contiguous().view(hn.size(1), -1)
return lstm_last_hidden_state
cnn_model = squeezenet1_1(pretrained=False, progress=True)
model = nn.Sequential(OrderedDict([
('timedistributed_cnn', TimeDistributed(nn.Conv2d(3, 60, (1, 1)), time_steps=lrcn_time_steps)),
# ('timedistributed_cnn', TimeDistributed(cnn_model, time_steps=lrcn_time_steps)),
# ('bidirectional_stacked_lstm', nn.LSTM(input_size=1000, hidden_size=lstm_hidden_size, num_layers=lstm_num_layers,
# batch_first=True, dropout=0.2, bidirectional=True)),
# ('hidden2dense', BiLSTMHidden2Dense()),
# ('dense', nn.Linear(in_features=2*lstm_hidden_size, out_features=lstm_hidden_size)),
# ('norm', nn.BatchNorm1d(num_features=lstm_hidden_size)),
# ('relu', nn.ReLU()),
# ('dropout', nn.Dropout(p=0.25)),
# ('last', nn.Linear(in_features=lstm_hidden_size, out_features=num_classes))
]))
return model | a17f58906f4d5b514e56f5cba22ac60bdf739b9c | 11,650 |
import tokenize
def index_document(connection, doc_id, content):
"""对document建立反向索引"""
words = tokenize(content)
pipe = connection.pipeline(True)
for word in words:
pipe.sadd('idx:' + word, doc_id)
return len(pipe.execute()) | 89572980c0bfadef9e1557b7e7831fc7aebe6716 | 11,651 |
def get_incar_magmoms(incarpath,poscarpath):
"""
Read in the magnetic moments in the INCAR
Args:
incarpath (string): path to INCAR
poscarpath (string): path to POSCAR
Returns:
mof_mag_list (list of floats): magnetic moments
"""
mof_mag_list = []
init_mof = read(poscarpath)
with open(incarpath,'r') as incarfile:
for line in incarfile:
line = line.strip()
if 'MAGMOM' in line:
mag_line = line.split('= ')[1:][0].split(' ')
for val in mag_line:
mag = float(val.split('*')[1])
num = int(val.split('*')[0])
mof_mag_list.extend([mag]*num)
if not bool(mof_mag_list):
mof_mag_list = np.zeros(len(init_mof))
if len(mof_mag_list) != len(mof_mag_list):
raise ValueError('Error reading INCAR magnetic moments')
return mof_mag_list | 6b75f415e7128213bab63d251a3fb6feb7576656 | 11,652 |
import plistlib
def remove_report_from_plist(plist_file_obj, skip_handler):
"""
Parse the original plist content provided by the analyzer
and return a new plist content where reports were removed
if they should be skipped. If the remove failed for some reason None
will be returned.
WARN !!!!
If the 'files' array in the plist is modified all of the
diagnostic section (control, event ...) nodes should be
re indexed to use the proper file array indexes!!!
"""
report_data = None
try:
report_data = parse_plist(plist_file_obj)
if not report_data:
return
except Exception as ex:
LOG.error("Plist parsing error")
LOG.error(ex)
return
file_ids_to_remove = []
try:
for i, f in enumerate(report_data['files']):
if skip_handler.should_skip(f):
file_ids_to_remove.append(i)
kept_diagnostics, kept_files = get_kept_report_data(report_data,
file_ids_to_remove)
report_data['diagnostics'] = kept_diagnostics
report_data['files'] = kept_files if kept_diagnostics else []
return plistlib.dumps(report_data)
except KeyError:
LOG.error("Failed to modify plist content, "
"keeping the original version")
return | fb14ccf1b0a1ad6b5e3b3e536e21386dbbcac84e | 11,654 |
def isTask(item): # pragma: no cover
"""Is the given item an OmniFocus task?"""
return item.isKindOfClass_(taskClass) | b0e2c813b29315e7b84cd9f2a4d211552dab9baf | 11,655 |
from typing import Tuple
import torch
def cox_cc_loss(g_case: Tensor, g_control: Tensor, shrink : float = 0.,
clamp: Tuple[float, float] = (-3e+38, 80.)) -> Tensor:
"""Torch loss function for the Cox case-control models.
For only one control, see `cox_cc_loss_single_ctrl` instead.
Arguments:
g_case {torch.Tensor} -- Result of net(input_case)
g_control {torch.Tensor} -- Results of [net(input_ctrl1), net(input_ctrl2), ...]
Keyword Arguments:
shrink {float} -- Shrinkage that encourage the net got give g_case and g_control
closer to zero (a regularizer in a sense). (default: {0.})
clamp {tuple} -- See code (default: {(-3e+38, 80.)})
Returns:
[type] -- [description]
"""
control_sum = 0.
shrink_control = 0.
if g_case.shape != g_control[0].shape:
raise ValueError(f"Need `g_case` and `g_control[0]` to have same shape. Got {g_case.shape}"+
f" and {g_control[0].shape}")
for ctr in g_control:
shrink_control += ctr.abs().mean()
ctr = ctr - g_case
ctr = torch.clamp(ctr, *clamp) # Kills grads for very bad cases (should instead cap grads!!!).
control_sum += torch.exp(ctr)
loss = torch.log(1. + control_sum)
shrink_zero = shrink * (g_case.abs().mean() + shrink_control) / len(g_control)
return torch.mean(loss) + shrink_zero.abs() | 1f528ff25984e0bb09bc49edf59d793a44281ddb | 11,657 |
def vector_field(mesh, v):
"""
Returns a np.array with values specified by `v`, where `v` should
be a iterable of length 3, or a function that returns an iterable of
length 3 when getting the coordinates of a cell of `mesh`.
"""
return field(mesh, v, dim=3) | 2d82fa86bc76367e2668b37815c068097d88c6fa | 11,658 |
def is_fav_recipe(request):
"""
Handles the requests from /ajax/is_fav_recipe/
Checks if a :model:`matega.recipe` is a saved recipe for a :model:'matega.user'
**Data**
Boolean if :model:`matega.recipe` is a saved recipe for :model:'matega.user'
"""
user_id = int(request.GET.get('user_id', None))
recipe_id = int(request.GET.get('recipe_id', None))
is_fav = False
user = User.objects.get(pk=user_id)
for rec in user.saved_recipes.values_list():
if rec[0] == recipe_id:
is_fav = True
data = {
'is_fav': is_fav
}
return JsonResponse(data) | f5ee3b21409f7a9ffe4ee427e19317f03b8db9c3 | 11,659 |
def people_interp():
"""
<enumeratedValueSet variable="People"> <value value="500"/> </enumeratedValueSet>
Integer between 1 and 500
"""
return f'<enumeratedValueSet variable="People"> <value value="%s"/> </enumeratedValueSet>' | 2aba1330a774e022c280d2e50e3fb63631989a88 | 11,660 |
def through_omas_s3(ods, method=['function', 'class_method'][1]):
"""
Test save and load S3
:param ods: ods
:return: ods
"""
filename = 'test.pkl'
if method == 'function':
save_omas_s3(ods, filename, user='omas_test')
ods1 = load_omas_s3(filename, user='omas_test')
else:
ods.save('s3', filename=filename, user='omas_test')
ods1 = ODS().load('s3', filename=filename, user='omas_test')
return ods1 | f89bb1c31a9bcbae869d07313ab10a7df658fd1c | 11,662 |
def read_packages(filename):
"""Return a python list of tuples (repository, branch), given a file
containing one package (and branch) per line.
Comments are excluded
"""
lines = load_order_file(filename)
packages = []
for line in lines:
if "," in line: # user specified a branch
path, branch = [k.strip() for k in line.split(",", 1)]
packages.append((path, branch))
else:
packages.append((line, "master"))
return packages | e73573003bd0388ed850fd2e996643a91199d30a | 11,663 |
import heapq
def find_min(x0, capacities):
"""
(int list, int list) --> (int list, int)
Find the schedule that minimizes the passenger wait time with the given capacity distribution
Uses a mixture of Local beam search and Genetic Algorithm
Returns the min result
"""
scores_and_schedules = []
# Generate 199 neighbouring schedules using the input schedule x0
init_neighbours = find_neighbours(199, 10, x0)
min_score = all_trains(x0, capacities, passengers)
min_sched = x0
heapq.heappush(scores_and_schedules,(min_score, x0))
# Add them all to the list, as well as the input schedule
for i in init_neighbours:
score = all_trains(i, capacities, passengers)
heapq.heappush(scores_and_schedules,(score, i))
if score < min_score:
min_score, min_sched = score, i
local_min_counter = 0
# Perform the genetic algorithm for optimization
while local_min_counter < 500:
scores_and_schedules = best_n(scores_and_schedules, capacities, 5)
if scores_and_schedules[0][0] < min_score:
min_score, min_sched = scores_and_schedules[0]
local_min_counter = 0
else:
local_min_counter += 1
return min_sched, min_score | 016cdd310f4b59e61349edd94b3b7cc387c3c7c1 | 11,664 |
def versioning(version: str) -> str:
"""
version to specification
Author: Huan <[email protected]> (https://github.com/huan)
X.Y.Z -> X.Y.devZ
"""
sem_ver = semver.parse(version)
major = sem_ver['major']
minor = sem_ver['minor']
patch = str(sem_ver['patch'])
if minor % 2:
patch = 'dev' + patch
fin_ver = '%d.%d.%s' % (
major,
minor,
patch,
)
return fin_ver | bfef27712b8595f52314f300743012270a42e64f | 11,665 |
def device_create_from_symmetric_key(transportType, deviceId, hostname, symmetricKey): # noqa: E501
"""Create a device client from a symmetric key
# noqa: E501
:param transportType: Transport to use
:type transportType: str
:param deviceId:
:type deviceId: str
:param hostname: name of the host to connect to
:type hostname: str
:param symmetricKey: key to use for connection
:type symmetricKey: str
:rtype: ConnectResponse
"""
return "do some magic!" | 15ac85df5a41f88044cf449f0f9d99bfcd72d570 | 11,667 |
def create_with_index(data, columns):
"""
Create a new indexed pd.DataFrame
"""
to_df = {columns[0]: [x for x in range(1, len(data) + 1)], columns[1]: data}
data_frame = pd.DataFrame(to_df)
data_frame.set_index("Index", inplace=True)
return data_frame | f9bb854af5d77f4355d64c8c56a9fdda7bd2cf93 | 11,668 |
def random_aes_key(blocksize=16):
"""Set 2 - Challenge 11"""
return afb(np.random.bytes(blocksize)) | f5bfad117886e51bbb810274c62e44e89ec2c79a | 11,669 |
import json
import torch
def create_and_load(directory: str,
name: str,
new_name: str = None) -> nn.Module:
"""Instantiate an unkown function (uf) required
by the high-order functions with a trained neural network
Args:
directory: directory to the saved weights of an NN
name: name of the unknown function
new_name: the new name of the unknown function
"""
if new_name is None:
new_name = name
with open('{}/{}.json'.format(directory, name)) as json_data:
params_dict = json.load(json_data)
params_dict['name'] = new_name
if params_dict['output_activation'] == 'None':
params_dict['output_activation'] = None
elif params_dict['output_activation'] == 'sigmoid':
params_dict['output_activation'] = torch.sigmoid
elif params_dict['output_activation'] == 'softmax':
params_dict['output_activation'] = nn.Softmax(dim=1)
else:
raise NotImplementedError()
new_fn, _ = get_nn_from_params_dict(params_dict)
new_fn.load('{}/{}.pth'.format(directory, name))
new_fn.eval()
return new_fn | 1ebf471fb624918b52953748a4f275b22aeaba1a | 11,670 |
def select_region_climatedata(gcm_name, rcp, main_glac_rgi):
"""
Get the regional temperature and precipitation for a given dataset.
Extracts all nearest neighbor temperature and precipitation data for a given set of glaciers. The mean temperature
and precipitation of the group of glaciers is returned. If two glaciers have the same temp/prec data, that data
is only used once in the mean calculations. Additionally, one would not expect for different GCMs to be similar
because they all have different resolutions, so this mean calculations will have different numbers of pixels.
Parameters
----------
gcm_name : str
GCM name
rcp : str
rcp scenario (ex. rcp26)
main_glac_rgi : pd.DataFrame
glacier dataset used to select the nearest neighbor climate data
"""
# Date tables
print('select_region_climatedata fxn dates supplied manually')
dates_table_ref = modelsetup.datesmodelrun(startyear=2000, endyear=2100, spinupyears=0,
option_wateryear=1)
dates_table = modelsetup.datesmodelrun(startyear=2000, endyear=2100, spinupyears=0,
option_wateryear=1)
# Load gcm lat/lons
gcm = class_climate.GCM(name=gcm_name, rcp_scenario=rcp)
# Select lat/lon from GCM
ds_elev = xr.open_dataset(gcm.fx_fp + gcm.elev_fn)
gcm_lat_values_all = ds_elev.lat.values
gcm_lon_values_all = ds_elev.lon.values
ds_elev.close()
# Lat/lon dictionary to convert
gcm_lat_dict = dict(zip(range(gcm_lat_values_all.shape[0]), list(gcm_lat_values_all)))
gcm_lon_dict = dict(zip(range(gcm_lon_values_all.shape[0]), list(gcm_lon_values_all)))
# Find nearest neighbors for glaciers that have pixles
latlon_nearidx = pd.DataFrame(np.zeros((main_glac_rgi.shape[0],2)), columns=['CenLat','CenLon'])
latlon_nearidx.iloc[:,0] = (np.abs(main_glac_rgi.CenLat.values[:,np.newaxis] - gcm_lat_values_all).argmin(axis=1))
latlon_nearidx.iloc[:,1] = (np.abs(main_glac_rgi.CenLon.values[:,np.newaxis] - gcm_lon_values_all).argmin(axis=1))
latlon_nearidx = latlon_nearidx.drop_duplicates().sort_values(['CenLat', 'CenLon'])
latlon_nearidx.reset_index(drop=True, inplace=True)
latlon_reg = latlon_nearidx.copy()
latlon_reg.CenLat.replace(gcm_lat_dict, inplace=True)
latlon_reg.CenLon.replace(gcm_lon_dict, inplace=True)
# ===== LOAD CLIMATE DATA =====
# Reference climate data
ref_gcm = class_climate.GCM(name=input.ref_gcm_name)
# Air temperature [degC], Precipitation [m], Elevation [masl], Lapse rate [K m-1]
ref_temp, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.temp_fn, ref_gcm.temp_vn, latlon_reg,
dates_table_ref)
ref_prec, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.prec_fn, ref_gcm.prec_vn, latlon_reg,
dates_table_ref)
# ref_elev = ref_gcm.importGCMfxnearestneighbor_xarray(ref_gcm.elev_fn, ref_gcm.elev_vn, latlon_reg)
# GCM climate data
gcm_temp_all, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn, gcm.temp_vn, latlon_reg, dates_table)
gcm_prec_all, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn, gcm.prec_vn, latlon_reg, dates_table)
# gcm_elev = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, latlon_reg)
# GCM subset to agree with reference time period to calculate bias corrections
gcm_subset_idx_start = np.where(dates_table.date.values == dates_table_ref.date.values[0])[0][0]
gcm_subset_idx_end = np.where(dates_table.date.values == dates_table_ref.date.values[-1])[0][0]
gcm_temp = gcm_temp_all[:,gcm_subset_idx_start:gcm_subset_idx_end+1]
gcm_prec = gcm_prec_all[:,gcm_subset_idx_start:gcm_subset_idx_end+1]
## ===== BIAS ADJUSTMENTS =====
# OPTION 2: Adjust temp and prec according to Huss and Hock (2015) accounts for means and interannual variability
if input.option_bias_adjustment == 2:
# TEMPERATURE BIAS CORRECTIONS
# Mean monthly temperature
ref_temp_monthly_avg = (ref_temp.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
gcm_temp_monthly_avg = (gcm_temp.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
# Monthly bias adjustment
gcm_temp_monthly_adj = ref_temp_monthly_avg - gcm_temp_monthly_avg
# Monthly temperature bias adjusted according to monthly average
t_mt = gcm_temp_all + np.tile(gcm_temp_monthly_adj, int(gcm_temp_all.shape[1]/12))
# Mean monthly temperature bias adjusted according to monthly average
t_m25avg = np.tile(gcm_temp_monthly_avg + gcm_temp_monthly_adj, int(gcm_temp_all.shape[1]/12))
# Calculate monthly standard deviation of temperature
ref_temp_monthly_std = (ref_temp.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).std(1).reshape(12,-1).transpose())
gcm_temp_monthly_std = (gcm_temp.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).std(1).reshape(12,-1).transpose())
variability_monthly_std = ref_temp_monthly_std / gcm_temp_monthly_std
# Bias adjusted temperature accounting for monthly mean and variability
gcm_temp_bias_adj = t_m25avg + (t_mt - t_m25avg) * np.tile(variability_monthly_std, int(gcm_temp_all.shape[1]/12))
# PRECIPITATION BIAS CORRECTIONS
# Calculate monthly mean precipitation
ref_prec_monthly_avg = (ref_prec.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
gcm_prec_monthly_avg = (gcm_prec.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
bias_adj_prec = ref_prec_monthly_avg / gcm_prec_monthly_avg
# Bias adjusted precipitation accounting for differences in monthly mean
gcm_prec_bias_adj = gcm_prec_all * np.tile(bias_adj_prec, int(gcm_temp_all.shape[1]/12))
# Regional means
reg_mean_temp_biasadj = gcm_temp_bias_adj.mean(axis=0)
reg_mean_prec_biasadj = gcm_prec_bias_adj.mean(axis=0)
return reg_mean_temp_biasadj, reg_mean_prec_biasadj | 8e2c4bf8a942b4a21d5549e9af87bacb75f92f26 | 11,671 |
def get_patch_boundaries(mask_slice, eps=2):
"""
Computes coordinates of SINGLE patch on the slice. Behaves incorrectly in the case of multiple tumors on the slice.
:mask_slice: 2D ndarray, contains mask with <0, 1, 2> values of pixels
:eps: int, number of additional pixels we extract around the actual mask coordinates
:return: `x_min`, `x_max`, `y_min`, `ymax`
"""
# check if we work with mask_slice that contains at least one non-zero pixel
if np.sum(mask_slice[:, :]) <= 0:
raise ValueError("Slice does not contains any tumors.")
# smallest index that has something except in its layer
x_min = None
for x in range(mask_slice.shape[0]):
if np.sum(mask_slice[x, :]) > 0:
# get first from the left index of nonzero 1D slice and break
x_min = x
break
x_max = None
for x in range(mask_slice.shape[0] - 1, -1, -1):
if np.sum(mask_slice[x, :]) > 0:
# get the first from the right index of nonzero 1D slice and break
x_max = x
break
y_min = None
for y in range(mask_slice.shape[1]):
if np.sum(mask_slice[:, y]) > 0:
# get the first from the bottom index of nonzero 1D slice and break
y_min = y
break
y_max = None
for y in range(mask_slice.shape[1] - 1, -1, -1):
if np.sum(mask_slice[:, y]) > 0:
# get the first from the top index of nonzero 1D slice and break
y_max = y
break
# apply `eps` parameter to the actual `min` and `max` values
x_min = max(x_min - eps, 0)
x_max = min(x_max + eps, mask_slice.shape[0] - 1)
y_min = max(y_min - eps, 0)
y_max = min(y_max + eps, mask_slice.shape[1] - 1)
return x_min, x_max, y_min, y_max | 0af985273b3e509bf9ee2580a64c8ddd6392d5a7 | 11,672 |
from typing import Union
from typing import List
def get_sqrt_ggn_extension(
subsampling: Union[None, List[int]], mc_samples: int
) -> Union[SqrtGGNExact, SqrtGGNMC]:
"""Instantiate ``SqrtGGN{Exact, MC} extension.
Args:
subsampling: Indices of active samples.
mc_samples: Number of MC-samples to approximate the loss Hessian. ``0``
uses the exact loss Hessian.
Returns:
Instantiated SqrtGGN extension.
"""
return (
SqrtGGNExact(subsampling=subsampling)
if mc_samples == 0
else SqrtGGNMC(subsampling=subsampling, mc_samples=mc_samples)
) | 47f074a387d0a6182d93061cbaaf4fc397be26c0 | 11,673 |
def gray_to_rgb(image):
"""convert cv2 image from GRAYSCALE to RGB
:param image: the image to be converted
:type image: cv2 image
:return: converted image
:rtype: cv2 image
"""
return cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) | feede538a2822d6d7f36bb6bbe40c845ca55808d | 11,674 |
def win_to_cygwin(winpath):
"""run `cygpath winpath` to get cygwin path"""
x = detail.command.run(['cygpath', winpath])
assert(len(x) == 1)
return x[0] | 1f941628fae51cfca7621c62c454e80d984f7019 | 11,675 |
def nucleotide_composition_to_letter(composition):
"""
Converts dictionary of {nucleotide letter: proportion} pairs
to IUPAC degenerate DNA letter.
Usage:
c = {'A': 1}
print(nucleotide_composition_to_letter(c)) --> 'A'
c = dict(zip('ACGT', [1, 1, 1, 1]))
print(nucleotide_composition_to_letter(c)) --> 'N'
c = dict(zip('ACGT', [1, 1, 2, 1]))
print(nucleotide_composition_to_letter(c)) --> 'n'
"""
nonzero_nucleotides = ''.join(sorted([n
for n, v in composition.items()
if v > 0]))
nonzero_proportions = [composition[n] for n in nonzero_nucleotides]
equimolar = min(nonzero_proportions) == max(nonzero_proportions)
letter = DEGENERATE_NUCLEOTIDE_CODE_REVERSED.get(nonzero_nucleotides,
DEFAULT_NUCLEOTIDE_LABEL)
if equimolar:
return letter
return letter.lower() | 2ad080d3a04cfc754f46d490a272362cadecbfd2 | 11,676 |
def forcast(doc):
"""
:param: doc object
:returns: tuple with grade level, age level
"""
word_tokens = doc.word_tokens
monosyllables = 0
for i in word_tokens:
if i.isalpha() == False and len(i) < 2:
word_tokens.remove(i)
for i in word_tokens[10:159]:
if syllable_count(i) < 2:
monosyllables += 1
gl = 20 - (monosyllables/10)
ra = 25 - (monosyllables/10)
return (gl, ra, monosyllables) | e71debbdaf057c61eaa620419b0357e603868989 | 11,677 |
def convert_coordinates_to_country(deg_x: float, deg_y: float) -> str:
""" returns country name """
return geocoder.osm([deg_x, deg_y], method="reverse").country | 5dca9d54bfa154a33a94550f983a3e9457cf2d52 | 11,678 |
def fixture_items(test_list):
"""Returns an instance of ItemCollection for testing"""
return test_list.get_items(query=QUERY) | 6351ffdb9ce8a65d7a08d55c6cfa9db8ef4aa978 | 11,679 |
def get_daily_discussion_post(subreddit_instance: praw.models.Subreddit):
"""Try to get the daily discussions post for a subreddit.
Args:
subreddit_instance
Returns:
The submission object for the discussion post, or None if it couldn't be found.
Works by searching the stickied posts of the subreddit for a post with 'daily discussion' in the title.
"""
print('Searching stickied posts for daily discussion posts..')
for sticky_num in [1, 2]:
discussion_post = subreddit_instance.sticky(number=sticky_num)
if 'daily discussion' in discussion_post.title.lower():
print(f'Got daily discussion post, title {discussion_post.title}')
return discussion_post
print("Couldn't find daily discussion post!")
return None | 841612a8b2d2fa7a8a74f081e360a69884b20925 | 11,681 |
def metric_by_training_size(X, y, classifier_list, training_set, metric, as_percentage=True):
"""
This is a refactoriation of code to repeat metrics for best fitted models by training set percentage size.
i.e.: Find accuracy rating for multiple training-test splits for svm, random forests, and naive bayes and return an
np.ndarray
:param X:
:param y:
:param classifier_list:
:param training_set:
:param metric:
:param as_percentage:
:return: np.ndarray
"""
metric_array = np.zeros((len(training_set), len(classifier_list)))
for row_num, training_size in enumerate(training_set):
X_train_iter, X_test_iter, y_train_iter, y_test_iter = train_test_split(X, y,
test_size=1 - training_size,
random_state=0)
metric_list = []
for classifier in classifier_list:
y_pred = classifier.fit(X_train_iter, y_train_iter).predict(X_test_iter)
metric_list.append(metric(y_test_iter, y_pred))
metric_array[row_num] = metric_list
metric_array = metric_array.transpose()
return 100 * metric_array if as_percentage else metric_array | 3d918989b28db47479da3479b1804b7a502b9ce0 | 11,682 |
def remove_namespace(tag, ns):
"""Remove namespace from xml tag."""
for n in ns.values():
tag = tag.replace('{' + n + '}', '')
return tag | d4837a3d906baf8e439806ccfea76284e8fd9b87 | 11,684 |
def false_prediction_pairs(y_pred, y_true):
"""
Prints pairs of predicted and true classes that differ.
Returns
-------
false_pairs
The pairs of classes that differ.
counts
Number of occurences of the pairs.
"""
cond = y_pred != y_true
false_preds = np.stack([y_true[cond], y_pred[cond]], axis=-1)
false_pairs, counts = np.unique(false_preds, axis=0, return_counts=True)
return false_pairs, counts | 460acb967b95d9e4c03e557e6bd1ede2dd7d0902 | 11,685 |
def draw_boxes_and_labels_to_image_multi_classes(image, classes, coords, scores=None, classes_name=None, classes_colors=None, font_color=[0, 0, 255]):
"""
Draw bboxes and class labels on image. Return or save the image with bboxes
Parameters
-----------
image : numpy.array
The RGB image [height, width, channel].
classes : list of int
A list of class ID (int).
coords : list of int
A list of list for coordinates.
- Should be [x, y, x2, y2]
scores : list of float
A list of score (float). (Optional)
classes_name : list of str
For converting ID to string on image.
classes_colors : list of color
A list of color [ [r,g,b], ...].
font_color : front color
Front color
Returns
-------
numpy.array
The output image.
"""
image = image.copy()
imh, imw = image.shape[0:2]
thick = int((imh + imw) // 500) # 粗细
for i, _v in enumerate(coords):
x, y, x2, y2 = np.asarray(coords[i], np.int32)
bbox_color = [0, 255, 0] if classes_colors is None else classes_colors[classes[i]]
cv2.rectangle(image, (x, y), (x2, y2), bbox_color, thick)
if classes is not None:
text = []
for c in classes[i]:
class_text = classes_name[c] if classes_name is not None else str(c)
# score_text = " %.2f" % (scores[i]) if scores is not None else ''
t = class_text #+ score_text
text.append(t)
text = '\n'.join(text)
score_text = " %.2f" % (scores[i]) if scores is not None else ''
text += score_text
font_scale = 1.0e-3 * imh
# text_size, _ = cv2.getTextSize(text, 0, font_scale, int(thick / 2) + 1)
# cv2.rectangle(image, (x, y), (x+text_size[0], y-text_size[1]), bbox_color, -1)
# cv2.putText(image, text, (x, y), 0, font_scale, font_color, int(thick / 3) + 1)
image = im_tool.put_text(image, text, (x, y), font_scale*32, font_color, bbox_color)
return image | 5a11dd98019e5096c83137c43457a312598e2be8 | 11,686 |
def simulate():
"""
Runs a simulation given a context, a simulator, a trace, and a depth
Method PUT
"""
context = request.get_json()['context']
simulator = request.get_json()['simulator']
trace = request.get_json()['trace']
depth = request.get_json()['depth']
if context is None or simulator is None or trace is None or depth is None:
return {'result': 'error'}, 400
ctx = contexts[context]['context']
sim = contexts[context]['simulators'][simulator]
tra = contexts[context]['traces'][trace]
dep = int(depth)
assert ctx is not None
assert sim is not None
assert tra is not None
sim.simulate(tra, dep)
return {'result': 'ok'}, 200 | d905eb9fa34454588d4d1ab95794a6b9c41074ac | 11,688 |
def soft_expected_backup_rl(
next_q: Array,
next_pol: Array,
next_log_pol: Array,
rew: Array,
done: Array,
discount: float,
er_coef: float,
) -> Array:
"""Do soft expected bellman-backup :math:`r + \gamma P \langle \pi, q - \tau * \log{\pi}\rangle`.
Args:
next_q (Array): ? x dA q-values.
next_pol (Array): ? x dA policy.
next_log_pol (Array): ? x dA log-policy.
rew (Array): ? x 1 rewards.
done (Array): ? x 1 done flags.
discount (float): Discount factor.
er_coef (float): Entropy coefficient.
Returns:
q (Array): ? x 1 q-values.
"""
chex.assert_rank([next_q, next_pol], 2)
next_v = next_pol * (next_q - er_coef * next_log_pol)
next_v = next_v.sum(axis=-1, keepdims=True) # ? x 1
q = rew + discount * next_v * (~done)
return q | fb1fed9946e05e4ad464f54c559be5e32c1f2e8e | 11,689 |
def generate_bias(series: pd.Series, effect_size: float = 1, power: float = 1) -> pd.Series:
"""
Calculate bias for sensitive attribute
Parameters
----------
series : pd.Series
sensitive attribute for which the bias is calculated.
effect_size : float, optional
Size of the bias for 1 std from the mean. The default is 1.
power : float, optional
power=1: linear bias, power=2: quadratic bias, etc. The default is 1.
Returns
-------
pd.Series
DESCRIPTION.
"""
bias = series.sub(series.mean()).pow(power)
bias = (bias - bias.mean())/bias.std() # Make the bias neutral
return bias * effect_size | a23a201dfeac8ed25cb923080f9c968d1a8a6583 | 11,692 |
def get_thellier_gui_meas_mapping(input_df, output=2):
"""
Get the appropriate mapping for translating measurements in Thellier GUI.
This requires special handling for treat_step_num/measurement/measurement_number.
Parameters
----------
input_df : pandas DataFrame
MagIC records
output : int
output to this MagIC data model (2 or 3)
Output
--------
mapping : dict (used in convert_meas_df_thellier_gui)
"""
if int(output) == 2:
thellier_gui_meas3_2_meas2_map = meas_magic3_2_magic2_map.copy()
if 'treat_step_num' in input_df.columns:
thellier_gui_meas3_2_meas2_map.update(
{'treat_step_num': 'measurement_number'})
thellier_gui_meas3_2_meas2_map.pop('measurement')
return thellier_gui_meas3_2_meas2_map
# 2 --> 3
else:
thellier_gui_meas2_2_meas3_map = meas_magic2_2_magic3_map.copy()
if 'measurement' in input_df.columns:
thellier_gui_meas2_2_meas3_map.pop('measurement_number')
try:
res = int(input_df.iloc[0]['measurement_number'])
if res < 100:
thellier_gui_meas2_2_meas3_map['measurement_number'] = 'treat_step_num'
except ValueError as ex:
pass
return thellier_gui_meas2_2_meas3_map | ba32104db56cfdb450015a0a43f0717263d5ea44 | 11,694 |
import time
def new_unsigned_vaccination_credential(
passenger_first_name: str,
passenger_last_name: str,
passenger_id_number: str,
passenger_date_of_birth: str,
vaccination_disease: str,
vaccination_vaccine: str,
vaccination_product: str,
vaccination_auth_holder: str,
vaccination_dose_number: str,
vaccination_total_doses: str,
vaccination_batch: str,
vaccination_date: str,
vaccination_next_date: str,
vaccination_center: str,
vaccination_professional: str,
vaccination_country: str,
issuer_did: str
):
"""Create a Claims object for a Verifiable Credentia in JWT format.
The returned object has just the plain claims object, and has to be
signed later.
"""
# Generate a random UUID, not related to anything in the credential
# This is important for privacy reasons to avoid possibility of
# correlation if the UUID is used for Revocation Lists in a blockchain
uid = unique_id.uuid4().hex
# Current time and expiration
now = int(time.time())
exp = now + 365*24*60*60 # The token will expire in 365 days
# Generate a template Verifiable Credential
credential = {
"iss": issuer_did,
"sub": passenger_id_number,
"iat": now,
"exp": exp,
"uuid": uid,
"vc": {
"@context": [
"https://www.w3.org/2018/credentials/v1",
"https://alastria.github.io/identity/credentials/v1",
"https://safeisland.org/.well-known/w3c-covid-test/v1"
],
"type": [
"VerifiableCredential",
"AlastriaVerifiableCredential",
"SafeIslandVaccinationCredential"
],
"credentialSchema": {
"id": "vaccinationCredential",
"type": "JsonSchemaValidator2018"
},
"credentialSubject": {
"vaccinationCredential": {
"patient": {
"name": passenger_last_name.upper() + "/" + passenger_first_name.upper(),
"idnumber": passenger_id_number,
"dob": passenger_date_of_birth
},
"vaccination": {
"disease": vaccination_disease,
"vaccine": vaccination_vaccine,
"product": vaccination_product,
"auth_holder": vaccination_auth_holder,
"dose_number": vaccination_dose_number,
"total_doses": vaccination_total_doses,
"batch": vaccination_batch,
"date": vaccination_date,
"next_date": vaccination_next_date,
"center": vaccination_center,
"professional": vaccination_professional,
"country": vaccination_country,
},
"comments": "These are some comments"
},
"issuedAt": ["redt.alastria"],
"levelOfAssurance": 2
}
}
}
return credential | e3a072e1d16a3520a5bad92e692e5f4de72e8b1d | 11,695 |
def calc_pk_integrated_intensities(p,x,pktype,num_pks):
"""
Calculates the area under the curve (integrated intensities) for fit peaks
Required Arguments:
p -- (m x u + v) peak parameters for number of peaks, m is the number of
parameters per peak ("gaussian" and "lorentzian" - 3, "pvoigt" - 4, "split_pvoigt"
- 5), v is the number of parameters for chosen bgtype
x -- (n) ndarray of coordinate positions
f -- (n) ndarray of intensity measurements at coordinate positions x
pktype -- string, type of analytic function that will be used to fit the data,
current options are "gaussian","lorentzian","pvoigt" (psuedo voigt), and
"split_pvoigt" (split psuedo voigt)
num_pks -- integer 'u' indicating the number of pks, must match length of p
Outputs:
ints -- (m) integrated intensities for m fit peaks
"""
ints=np.zeros(num_pks)
if pktype == 'gaussian' or pktype == 'lorentzian':
p_fit=np.reshape(p[:3*num_pks],[num_pks,3])
elif pktype == 'pvoigt':
p_fit=np.reshape(p[:4*num_pks],[num_pks,4])
elif pktype == 'split_pvoigt':
p_fit=np.reshape(p[:6*num_pks],[num_pks,6])
for ii in np.arange(num_pks):
if pktype == 'gaussian':
ints[ii]=integrate.simps(pkfuncs._gaussian1d_no_bg(p_fit[ii],x),x)
elif pktype == 'lorentzian':
ints[ii]=integrate.simps(pkfuncs._lorentzian1d_no_bg(p_fit[ii],x),x)
elif pktype == 'pvoigt':
ints[ii]=integrate.simps(pkfuncs._pvoigt1d_no_bg(p_fit[ii],x),x)
elif pktype == 'split_pvoigt':
ints[ii]=integrate.simps(pkfuncs._split_pvoigt1d_no_bg(p_fit[ii],x),x)
return ints | d3ab50d6e6e5d2187917e06a8258a46ac5d4db18 | 11,696 |
def read_fid_ntraces(filename, shape=None, torder='flat', as_2d=False,
read_blockhead=False):
"""
Read a Agilent/Varian binary (fid) file possibility having multiple
traces per block.
Parameters
----------
filename : str
Filename of Agilent/Varian binary file (fid) to read.
shape : tuple of ints, optional
Shape of the binary data. If not provided data is returned as a 2D
array. Required if more than one trace per block (non-standard).
torder : {'f', 'n', 'o'}
Trace order. See :py:func:`read` for details.
as_2d : bool, optional
True to return the data as a 2D array, ignoring the shape and torder
parameters.
read_blockhead : bool, optional
True to read the Agilent/Varian blockheaders(s) into the returned
dictionary. False ignores them.
Returns
-------
dic : dict
Dictionary of Agilent/Varian binary file parameters.
data : array_like
Low memory object which can access NMR data on demand.
See Also
--------
read_fid : Read a Agilent/Varian binary file with one trace per block.
read_fid_lowmem : Read a Agilent/Varian binary file with one trace per
block using minimal amounts of memory.
"""
# open the file
f = open(filename, 'rb')
# read the fileheader
dic = fileheader2dic(get_fileheader(f))
# data parameters
dt = find_dtype(dic)
nblocks = dic["nblocks"]
pts = dic["np"]
nbheaders = dic["nbheaders"]
ntraces = dic["ntraces"]
# read the data
if read_blockhead:
bdic, data = get_nblocks_ntraces(f, nblocks, ntraces, pts,
nbheaders, dt, read_blockhead)
dic["blockheader"] = bdic
else:
data = get_nblocks_ntraces(f, nblocks, ntraces, pts, nbheaders, dt,
read_blockhead)
f.close()
# uninterleave the real and imaginary data
data = uninterleave_data(data)
# if 2D array requested, return unshaped
if as_2d:
return dic, data
# check for 1D
if data.shape[0] == 1:
return dic, np.squeeze(data)
# try to reshape
if shape is None:
warn("unknown shape, returning unshaped data")
return dic, data
# reorder 3D/4D data
if len(shape) >= 3:
return dic, reorder_data(data, shape, torder)
try:
data = data.reshape(shape)
except ValueError:
warn(str(data.shape) + "cannot be shaped into" + str(shape))
return dic, data
return dic, data | d82f341326d089dad9def8a95b4233cf4dde607d | 11,697 |
import typing
async def async_get_erc20_decimals(
token: spec.ERC20Reference,
block: typing.Optional[spec.BlockNumberReference] = None,
**rpc_kwargs: typing.Any
) -> int:
"""get decimals of an erc20"""
return await erc20_generic.async_erc20_eth_call(
function_name='decimals', token=token, block=block, **rpc_kwargs
) | 665c9e697caffd9470c4f71769c8d215ce7d14a0 | 11,698 |
def get_game_log(game_id: int):
"""
Method used to get list of important events of macau game with given game id.
:param game_id: integer value of existing game
:return: list with string with all important events in game
"""
if game_id >= len(games_container):
return JSONResponse(content={'status': 'No game', 'output': None}, status_code=404)
outputs = games_container[game_id]['outputs']['game']
return {"status": "OK", "output": outputs} | 837b43b24f747fabb819fe5eeb3e284694fd02a3 | 11,699 |
import time
def time_this_function(func):
"""
Time the function.
use as a decorator.
Examples
---------
::
@time_this_function
def func(x):
return x
a= func(1)
Parameters
----------
func: Callable
function
Returns
-------
result
function results
"""
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(func.__name__, "time", end - start)
return result
return wrapper | 6ee2d12dc2301e1c3efe2ca02548297aa83d316f | 11,701 |
def power_plot(data, sfreq, toffset, log_scale, zscale, title):
"""Plot the computed power of the iq data."""
print("power")
t_axis = np.arange(0, len(data)) / sfreq + toffset
if log_scale:
lrxpwr = 10 * np.log10(data + 1e-12)
else:
lrxpwr = data
zscale_low, zscale_high = zscale
if zscale_low == 0 and zscale_high == 0:
if log_scale:
zscale_low = np.min(lrxpwr[np.where(lrxpwr.real != -np.Inf)])
zscale_high = np.max(lrxpwr) + 3.0
else:
zscale_low = np.min(lrxpwr)
zscale_high = np.max(lrxpwr)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(t_axis, lrxpwr.real)
ax.grid(True)
ax.axis([toffset, t_axis[len(t_axis) - 1], zscale_low, zscale_high])
ax.set_xlabel("time (seconds)")
if log_scale:
ax.set_ylabel("power (dB)")
else:
ax.set_ylabel("power")
ax.set_title(title)
return fig | 06d7fab09c027ec2dbf7a11fa78f3b6fcd97e2d7 | 11,702 |
import torchvision
import torch
def load_dataset(dataset):
"""
Loads a dataset and returns train, val and test partitions.
"""
dataset_to_class = {
'mnist': torchvision.datasets.MNIST,
'cifar10': torchvision.datasets.CIFAR10,
'fa-mnist': torchvision.datasets.FashionMNIST
}
assert dataset in dataset_to_class.keys()
transform = transforms.Compose([transforms.ToTensor()])
train_dataset = dataset_to_class[dataset](root='./data', train=True, download=True, transform=transform)
train_split, val_split = torch.utils.data.random_split(train_dataset, lengths=[len(train_dataset)-10000, 10000])
test_split = dataset_to_class[dataset](root='./data', train=False, download=True, transform=transform)
return train_split, val_split, test_split | e17dcec84603a742cb6eec0fa18ad40af2454461 | 11,703 |
def compareTo(s1, s2):
"""Compares two strings to check if they are the same length and whether one is longer
than the other"""
move_slice1 = 0
move_slice2 = 1
if s1[move_slice1:move_slice2] == '' and s2[move_slice1:move_slice2] == '':
return 0 # return 0 if same length
elif s1[move_slice1:move_slice2] == '' and s2[move_slice1:move_slice2] != '':
return len(s2) * -1 # return negative number if s2 > s1
elif s1[move_slice1:move_slice2] != '' and s2[move_slice1:move_slice2] == '':
return len(s1) # return positive number if s1 > s2
else:
move_slice1 += 1 # with each new call, the next object in the string is checked if empty or not
move_slice2 += 1
return compareTo(s1[1:], s2[1:]) | 4700360d10561227a6d4995c66953993dce1cea3 | 11,704 |
def is_unique(x):
# A set cannot contain any duplicate, so we just check that the length of the list is the same as the length of the corresponding set
"""Check that the given list x has no duplicate
Returns:
boolean: tells if there are only unique values or not
Args:
x (list): elements to be compared
"""
return len(x) == len(set(x)) | 12b4513a71fc1b423366de3f48dd9e21db79e73a | 11,705 |
def str2format(fmt, ignore_types=None):
"""Convert a string to a list of formats."""
ignore_types = ignore_types if ignore_types else ()
token_to_format = {
"s": "",
"S": "",
"d": "g",
"f": "f",
"e": "e",
}
base_fmt = "{{:{}}}"
out = []
for i, token in enumerate(fmt.split(",")):
n = token[:-1]
if i in ignore_types:
out.append(base_fmt.format(n.split(".")[0]))
elif token[-1].lower() == "s":
out.append(base_fmt.format("{}.{}".format(n, n)))
else:
out.append(base_fmt.format(">{}{}".format(n, token_to_format[token[-1]])))
return out | 9cbe719abe6b37a0adcd52af250dfe768f850ffa | 11,706 |
from pathlib import Path
def _read_concordance(filename: Path, Sample_IDs: pd.Index) -> pd.DataFrame:
"""Create a flag of known replicates that show low concordance.
Given a set of samples that are known to be from the same Subject. Flag
samples that show low concordance with one or more replicates.
Returns:
pd.Series:
- Sample_ID (pd.Index)
- is_discordant_replicate (bool): True if replicates show
a concordance below the supplied threshold. Otherwise False.
"""
df = sample_concordance.read(filename)
return (
df.melt(
id_vars=["is_discordant_replicate"],
value_vars=["Sample_ID1", "Sample_ID2"],
var_name="To_Drop",
value_name="Sample_ID",
)
.drop("To_Drop", axis=1)
.groupby("Sample_ID")
.max() # Flag a sample as True if it is True for any comparison.
.astype("boolean")
.reindex(Sample_IDs)
) | 2c7049ff5b521927ffbf863471fc23e073bce531 | 11,707 |
def load_image(name):
""" Get and cache an enaml Image for the given icon name.
"""
path = icon_path(name)
global _IMAGE_CACHE
if path not in _IMAGE_CACHE:
with open(path, 'rb') as f:
data = f.read()
_IMAGE_CACHE[path] = Image(data=data)
return _IMAGE_CACHE[path] | 6ce56c1a9d4d9e80d25a19aca239f43ebd119840 | 11,708 |
def add_shipment_comment(
tracking_id: str,
body: CreateComment = Body(...),
client: VBR_Api = Depends(vbr_admin_client),
):
"""Add a Comment to a Shipment.
Requires: **VBR_WRITE_PUBLIC**"""
tracking_id = sanitize_identifier_string(tracking_id)
shipment = client.get_shipment_by_tracking_id(tracking_id)
data_event = client.create_and_link(comment=body.comment, link_target=shipment)[0]
return Comment(comment=data_event.comment, timestamp=data_event.event_ts) | d115d230bbf47a8f1cf625f0ab66e855f382244c | 11,709 |
def _mp2_energy(output_str):
""" Reads the MP2 energy from the output file string.
Returns the energy in Hartrees.
:param output_str: string of the program's output file
:type output_str: str
:rtype: float
"""
ene = ar.energy.read(
output_str,
app.one_of_these([
app.escape('Total MP2 energy'),
app.escape('MP2 energy')
]))
return ene | febd9f4c5759cb6150ff16bda2e9050199c48c5f | 11,711 |
def fastlcs(a,b,Dmax=None):
"""
return the length of the longest common substring or 0 if the maximum number of difference Dmax cannot be respected
Implementation: see the excellent paper "An O(ND) Difference Algorithm and Its Variations" by EUGENE W. MYERS, 1986
NOTE:
let D be the minimal number of insertion or deletion that transform A into B
let L be the length of a longest common substring
we always have D = M + N - 2 * L
"""
N, M = len(a), len(b)
if N+M == 0: return 0 #very special case...
if Dmax == None:
Dmax = N + M #worse case
else:
Dmax = min(Dmax, M+N) #a larger value does not make sense!
assert Dmax >= 0, "SOFWARE ERROR: Dmax must be a positive integer"
sesLength = None
W = [0] * (Dmax * 2 + 2) #for i in -Dmax..Dmax, V[i] == W[i+Dmax)
for D in range(0, Dmax+1):
for k in range(-D, +D+1, 2):
if k == -D or (k != D and W[k-1+Dmax] < W[k+1+Dmax]): #k == -D or (k != D and V[k-1] < V[k+1])
x = W[k+1+Dmax] #x = V[k+1]
else:
x = W[k-1+Dmax]+1 #x = V[k-1]+1
y = x - k
while x < N and y < M and a[x] == b[y]: #follow any snake
x += 1
y += 1
W[k+Dmax] = x # V[k] = x #farstest reaching point with D edits
if x >= N and y >= M:
sesLength = D
L = (M+N-D) / 2
assert D == M+N-L-L, ("INTERNAL SOFWARE ERROR", M,N,D)
return L
return 0 | d8a88c7ffaae892e48a292b7a045da9f5dc58173 | 11,712 |
from typing import Optional
from typing import Mapping
def FeaturesExtractor( # pylint: disable=invalid-name
eval_config: config_pb2.EvalConfig,
tensor_representations: Optional[Mapping[
Text, schema_pb2.TensorRepresentation]] = None) -> extractor.Extractor:
"""Creates an extractor for extracting features.
The extractor acts as follows depending on the existence of certain keys
within the incoming extracts:
1) Extracts contains tfma.ARROW_RECORD_BATCH_KEY
The features stored in the RecordBatch will be extracted and added to the
output extract under the key tfma.FEATURES_KEY and the raw serialized inputs
will be added under the tfma.INPUT_KEY. Any extracts that already exist will
be merged with the values from the RecordBatch with the RecordBatch values
taking precedence when duplicate keys are detected. The
tfma.ARROW_RECORD_BATCH_KEY key will be removed from the output extracts.
2) Extracts contains tfma.FEATURES_KEY (but not tfma.ARROW_RECORD_BATCH_KEY)
The operation will be a no-op and the incoming extracts will be passed as is
to the output.
3) Extracts contains neither tfma.FEATURES_KEY | tfma.ARROW_RECORD_BATCH_KEY
An exception will be raised.
Args:
eval_config: Eval config.
tensor_representations: Optional tensor representations to use when parsing
the data. If tensor_representations are not passed or a representation is
not found for a given feature name a default representation will be used
where possible, otherwise an exception will be raised.
Returns:
Extractor for extracting features.
"""
del eval_config
# pylint: disable=no-value-for-parameter
return extractor.Extractor(
stage_name=_FEATURES_EXTRACTOR_STAGE_NAME,
ptransform=_ExtractFeatures(tensor_representations or {})) | 86e58783fca3ebb23de1e6b7ac9cdd4030e99c38 | 11,713 |
def assert__(engine, obj, condition, message=u'Assertion failed'):
""":yaql:assert
Evaluates condition against object. If it evaluates to true returns the
object, otherwise throws an exception with provided message.
:signature: obj.assert(condition, message => "Assertion failed")
:arg obj: object to evaluate condition on
:argType obj: any
:arg condition: lambda function to be evaluated on obj. If result of
function evaluates to false then trows exception message
:argType condition: lambda
:arg message: message to trow if condition returns false
:argType message: string
:returnType: obj type or message
.. code::
yaql> 12.assert($ < 2)
Execution exception: Assertion failed
yaql> 12.assert($ < 20)
12
yaql> [].assert($, "Failed assertion")
Execution exception: Failed assertion
"""
if utils.is_iterator(obj):
obj = utils.memorize(obj, engine)
if not condition(obj):
raise AssertionError(message)
return obj | c29e073bf6673ce0c89ed339c27f2287d6952991 | 11,714 |
from typing import Iterable
from typing import List
def build_level_codes(incoming_column_name: str, levels: Iterable) -> List[str]:
"""
Pick level names for a set of levels.
:param incoming_column_name:
:param levels:
:return:
"""
levels = [str(lev) for lev in levels]
levels = [incoming_column_name + "_lev_" + clean_string(lev) for lev in levels]
if len(set(levels)) != len(levels):
levels = [levels[i] + "_" + str(i) for i in range(len(levels))]
return levels | 994ccc0673bd27dcce30709a97372c29d75a8e67 | 11,716 |
def get_all(isamAppliance, check_mode=False, force=False):
"""
Get all rsyslog objects
"""
return isamAppliance.invoke_get("Get all rsyslog objects",
"/core/rsp_rsyslog_objs") | 55ff144577a9ef25b555ca3a37db65bfdb0f0af4 | 11,717 |
def genomic_dup1_37_loc():
"""Create test fixture GRCh37 duplication subject"""
return {
"_id": "ga4gh:VSL.CXcLL6RUPkro3dLXN0miGEzlzPYiqw2q",
"sequence_id": "ga4gh:SQ.VNBualIltAyi2AI_uXcKU7M9XUOuA7MS",
"interval": {
"type": "SequenceInterval",
"start": {"value": 49568693, "type": "Number"},
"end": {"value": 49568695, "type": "Number"},
},
"type": "SequenceLocation",
} | 470af80795c649bc0f4dd29393d1093c45c9f0da | 11,718 |
def parse(f, _bytes):
"""
Parse function will take a parser combinator and parse some set of bytes
"""
if type(_bytes) == Parser:
return f(_bytes)
else:
s = Parser(_bytes, 0)
return f(s) | 7f824c46477a384ce97f66806813f4b42412d6d8 | 11,719 |
def spiral_tm(wg_width=0.5, length=2):
""" sample of component cutback """
c = spiral_inner_io_euler(wg_width=wg_width, length=length, dx=10, dy=10, N=5)
cc = add_gratings_and_loop_back(
component=c,
grating_coupler=pp.c.grating_coupler_elliptical_tm,
bend_factory=pp.c.bend_circular,
)
return cc | e7fcec9e61984d8f89558d7479cc942db389ba3a | 11,720 |
from typing import List
from typing import Tuple
def _chunk(fst: pynini.Fst) -> List[Tuple[str, str]]:
"""Chunks a string transducer into tuples.
This function is given a string transducer of the form:
il1 il2 il3 il4 il5 il6
ol1 eps eps ol2 eps ol3
And returns the list:
[(il1 il2 il3, ol1), (il4 il5, ol2), (il6, ol3)]
It thus recovers the "many-to-one" alignment.
Args:
fst: a string transducer containing the alignment.
Returns:
A list of string, char tuples.
"""
# Input epsilon-normalization and removal forces a sensible alignment.
fst = pynini.epsnormalize(fst).rmepsilon()
assert (
fst.properties(pynini.STRING, True) == pynini.STRING
), "FST is not a string automaton"
alignment: List[Tuple[str, str]] = []
state = 0
arc = fst.arcs(state).value()
assert arc.ilabel, f"Input label leaving state {state} contains epsilon"
ilabels = bytearray([arc.ilabel])
assert arc.olabel, f"Output label leaving state {state} contains epsilon"
olabel = arc.olabel
for state in range(1, fst.num_states() - 1):
arc = fst.arcs(state).value()
assert (
arc.ilabel
), f"Input label leaving state {state} contains epsilon"
# A non-epsilon olabel signals a new chunk.
if arc.olabel:
alignment.append((ilabels.decode("utf8"), chr(olabel)))
ilabels.clear()
olabel = arc.olabel
ilabels.append(arc.ilabel)
assert (
ilabels
), f"Input label leaving penultimate state {state} contains epsilon"
alignment.append((ilabels.decode("utf8"), chr(olabel)))
return alignment | fa50e13062267e8929df5f538ab9a924822bc265 | 11,722 |
def auth_token_required(func):
"""Your auth here"""
return func | e65b94d40c914c57ff8d894409b664cf97aa790d | 11,723 |
def base_convert_money(amount, currency_from, currency_to):
"""
Convert 'amount' from 'currency_from' to 'currency_to'
"""
source = get_rate_source()
# Get rate for currency_from.
if source.base_currency != currency_from:
rate_from = get_rate(currency_from)
else:
# If currency from is the same as base currency its rate is 1.
rate_from = Decimal(1)
# Get rate for currency_to.
rate_to = get_rate(currency_to)
if isinstance(amount, float):
amount = Decimal(amount).quantize(Decimal('.000001'))
# After finishing the operation, quantize down final amount to two points.
return ((amount / rate_from) * rate_to).quantize(Decimal("1.00")) | 5417ba7a9d757bafc835df8f55a1d4e6de72cb2f | 11,724 |
async def contestant() -> dict:
"""Create a mock contestant object."""
return {
"id": "290e70d5-0933-4af0-bb53-1d705ba7eb95",
"first_name": "Cont E.",
"last_name": "Stant",
"birth_date": date(1970, 1, 1).isoformat(),
"gender": "M",
"ageclass": "G 12 år",
"region": "Oslo Skikrets",
"club": "Lyn Ski",
"team": "Team Kollen",
"email": "[email protected]",
"event_id": "ref_to_event",
"bib": 1,
} | 261fd560107489b58c645efb1bb9c19a396e0dce | 11,726 |
import requests
import http
def GetApitoolsTransport(timeout='unset',
enable_resource_quota=True,
response_encoding=None,
ca_certs=None,
allow_account_impersonation=True,
use_google_auth=None,
response_handler=None,
redact_request_body_reason=None):
"""Get an transport client for use with apitools.
Args:
timeout: double, The timeout in seconds to pass to httplib2. This is the
socket level timeout. If timeout is None, timeout is infinite. If
default argument 'unset' is given, a sensible default is selected.
enable_resource_quota: bool, By default, we are going to tell APIs to use
the quota of the project being operated on. For some APIs we want to use
gcloud's quota, so you can explicitly disable that behavior by passing
False here.
response_encoding: str, the encoding to use to decode the response.
ca_certs: str, absolute filename of a ca_certs file that overrides the
default
allow_account_impersonation: bool, True to allow use of impersonated service
account credentials for calls made with this client. If False, the
active user credentials will always be used.
use_google_auth: bool, True if the calling command indicates to use
google-auth library for authentication. If False, authentication will
fallback to using the oauth2client library.
response_handler: requests.ResponseHandler, handler that gets executed
before any other response handling.
redact_request_body_reason: str, the reason why the request body must be
redacted if --log-http is used. If None, the body is not redacted.
Returns:
1. A httplib2.Http-like object backed by httplib2 or requests.
"""
if base.UseRequests():
if response_handler:
if not isinstance(response_handler, core_requests.ResponseHandler):
raise ValueError('response_handler should be of type ResponseHandler.')
if (properties.VALUES.core.log_http.GetBool() and
properties.VALUES.core.log_http_streaming_body.GetBool()):
# We want to print the actual body instead of printing the placeholder.
# To achieve this, we need to set streaming_response_body as False.
# Not that the body will be empty if the response_handler has already
# consumed the stream.
streaming_response_body = False
else:
streaming_response_body = response_handler.use_stream
else:
streaming_response_body = False
session = requests.GetSession(
timeout=timeout,
enable_resource_quota=enable_resource_quota,
ca_certs=ca_certs,
allow_account_impersonation=allow_account_impersonation,
streaming_response_body=streaming_response_body,
redact_request_body_reason=redact_request_body_reason)
return core_requests.GetApitoolsRequests(session, response_handler,
response_encoding)
return http.Http(timeout=timeout,
enable_resource_quota=enable_resource_quota,
response_encoding=response_encoding,
ca_certs=ca_certs,
allow_account_impersonation=allow_account_impersonation,
use_google_auth=use_google_auth) | 21fc8d521703580a51c753811b7f0d401f68bba5 | 11,727 |
def user_requested_anomaly7():
""" Checks if the user requested an anomaly, and returns True/False accordingly. """
digit = 0
res = False
if is_nonzero_file7(summon_filename):
lines = []
with open(get_full_path(summon_filename)) as f:
lines = f.readlines()
if len(lines) > 0:
try:
digit = int(lines[0])
if digit > 0:
res = True
except Exception as e:
res = False
append_logs("ERROR:" + str(e), name4logs, "always")
else:
res = False
else:
res = False
# Disable summoning of anomalies after the requested number of anomalies were added
if res:
with open(get_full_path(summon_filename), "w") as f:
if digit > 0:
f.write(str(digit - 1))
else:
f.write("0")
return res | bed54831c00deb6c11ce81c731fe37f35ef070b7 | 11,728 |
def mat_to_r(_line, _mat_object : MatlabObject, _r_object : RObject = RObject()):
"""Move variables from Matlab to R
Parameters
----------
_line : str, Iterable[str]
If str, one of the following:
1. '#! m[at[lab]] -> <vars>'
2. '<vars>'
where <vars> is a comma separated list of Matlab variable names
If Iterable[str]: [<var1>, <var2>, ...]
where <varX> is the name of a Matlab variable
All variables must be str, int, float.
_mat_object : Matlabobject
The Matlab environment where the variables are stored
_r_object : optional[RObject]
The R environment to load the variables into
Default: new RObject()
Returns
-------
MatlabObject
A Matlab environment with the given variables loaded
Raises
------
RuntimeError:
If _mat_object or _r_object is not alive
ValueError
If _line is not the right format
NameError
If a requested variable is not in the given Matlab environment
"""
## input validation
if not _mat_object.isalive:
# can't do anything
raise RuntimeError('Matlab connection was killed before things could be brought back to Python.')
if type(_line) is str and ('#!' in _line or '%!' in _line):
# _line = '#! <lang> -> <vars>'
if not '->' in _line:
raise ValueError('Misformatted line: "' + _line + '"')
_to_load = _line.split('->')[1].replace(' ','').split(',')
elif type(_line) is str:
# _line = '<vars>'
_to_load = _line.replace(' ','').split(',')
elif hasattr(_line, '__iter__') and all([type(i) is str for i in _line]):
# _line = [<var i>, ...]
_to_load = list(_line)
else:
raise ValueError('Unrecognized _line')
if not _r_object.isalive:
# can't do anything
raise RuntimeError('R connection was killed before things could be send to it.')
if _to_load[0] == '':
# null case
return _r_object
# check the variables
_who = _mat_object.who
for i in _to_load:
if i not in _who:
raise NameError(str(i) + ' not in Matlab environment')
# bundle them
_random_name = ''.join(choices('abcdefghijklmnopqrstuvwxyz', k=10))
_mat_object.sendline(_random_name + ' = tempname')
_temp_file = _mat_object.before.split('\r\n\r\n')[2].strip()[1:-1]
# get them
_mat_object.sendlines([
'save ' + _temp_file + '.mat ' + ' '.join(_to_load),
'clear ' + _random_name
])
# load them
_r_object.sendlines(
[
'library("R.matlab")',
_random_name + ' <- readMat("' + _temp_file + '.mat")'
] + [
_current + ' <- ' + _random_name + '$' + _current
for _current in _to_load
] + [
'rm(' + _random_name + ')'
]
)
return _r_object | 22f78c06bcf47a71596563debf6115c954d89e21 | 11,729 |
def RecalculatedEdgeDegreeAttack(G, remove_fraction = 1.0):
""" Recalculated Edge Degree Attack
"""
n = G.number_of_nodes()
m = int(G.number_of_edges() * (remove_fraction+0.0) )
tot_ND = [0] * (m + 1)
tot_T = [0] * (m + 1)
ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
tot_ND[0] = ND
tot_T[0] = 0
for i in range(m):
# calculate max edge degree
cur_max_edge_degree = -1
cur_max_u = -1
cur_max_v = -1
for u, v in G.edges():
temp = G.degree(u) * G.degree(v)
if temp > cur_max_edge_degree:
cur_max_edge_degree = temp
cur_max_u = u
cur_max_v = v
# remove edge
G.remove_edge(cur_max_u, cur_max_v)
# calculate and save ND
ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
tot_ND[i+1] = ND
tot_T [i+1] = i + 1
return (tot_ND, tot_T) | ff88430f172a1ca319af9d637c292091cab2bf6f | 11,731 |
def get(url, params=None, headers=None):
"""Return the contents from a URL
Params:
- url (str): Target website URL
- params (dict, optional): Param payload to add to the GET request
- headers (dict, optional): Headers to add to the GET request
Example:
```
get('https://httpbin.org/anything', {'soup': 'gazpacho'})
```
"""
opener = build_opener()
if params:
url += "?" + urlencode(params)
if headers:
for h in headers.items():
opener.addheaders = [h]
if (headers and not headers.get("User-Agent")) or not headers:
UA = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:69.0) Gecko/20100101 Firefox/69.0"
opener.addheaders = [("User-Agent", UA)]
with opener.open(url) as f:
content = f.read().decode("utf-8")
return content | edb0fe25728fe1bd11d9e74509a630f2d3823af1 | 11,732 |
def get_param_num(model):
""" get the number of parameters
Args:
model:
Returns:
"""
return sum(p.numel() for p in model.parameters()) | 19d98a1bcbdcb827be4a657f82cda2ff09f119e4 | 11,733 |
from typing import Union
def format_tensor_to_ndarray(x: Union[ms.Tensor, np.ndarray]) -> np.ndarray:
"""Unify `mindspore.Tensor` and `np.ndarray` to `np.ndarray`. """
if isinstance(x, ms.Tensor):
x = x.asnumpy()
if not isinstance(x, np.ndarray):
raise TypeError('input should be one of [ms.Tensor or np.ndarray],'
' but receive {}'.format(type(x)))
return x | 6e64a40bbafe2b2f89f5afd200077be369adcfe7 | 11,735 |
from typing import Pattern
import re
def hunt_csv(regex: Pattern, body: str) -> list:
"""
finds chunk of csv in a larger string defined as regex, splits it,
and returns as list. really useful only for single lines.
worse than StringIO -> numpy or pandas csv reader in other cases.
"""
csv_string = re.search(regex, body)[0]
if r"\n" in csv_string:
lines = csv_string.split(r"\n")
processed_lines = []
for line in lines:
csv_fields = line.split(",")
csv_fields = [field.strip() for field in csv_fields]
processed_lines.append(csv_fields)
return processed_lines
csv_fields = csv_string.split(",")
return [field.strip() for field in csv_fields] | 9c5574f059ef05e6f99e468a9272f42393d79030 | 11,736 |
import re
def time_key(file_name):
""" provides a time-based sorting key """
splits = file_name.split('/')
[date] = re.findall(r'(\d{4}_\d{2}_\d{2})', splits[-2])
date_id = [int(token) for token in date.split('_')]
recording_id = natural_key(splits[-1])
session_id = session_key(splits[-2])
return date_id + session_id + recording_id | 07a5448b7b39b00780f53080b316981198d54c91 | 11,737 |
def sizeRange(contourList, low, high):
"""Only keeps contours that are in range for size"""
newList = []
for i in contourList:
if (low <= cv2.contourArea(i) <= high):
newList.append(i)
return newList | ac83b09acfd8d8e23a03965b52c5c4cc0361710d | 11,739 |
def number_field_choices(field):
"""
Given a field, returns the number of choices.
"""
try:
return len(field.get_flat_choices())
except AttributeError:
return 0 | b8776e813e9eb7471a480df9d6e49bfeb48a0eb6 | 11,740 |
def _is_an_unambiguous_user_argument(argument: str) -> bool:
"""Check if the provided argument is a user mention, user id, or username (name#discrim)."""
has_id_or_mention = bool(commands.IDConverter()._get_id_match(argument) or RE_USER_MENTION.match(argument))
# Check to see if the author passed a username (a discriminator exists)
argument = argument.removeprefix("@")
has_username = len(argument) > 5 and argument[-5] == "#"
return has_id_or_mention or has_username | adecc093a0597d43292171f867ebcf5a64edc7d8 | 11,741 |
def resize_image(image, size):
"""
Resize the image to fit in the specified size.
:param image: Original image.
:param size: Tuple of (width, height).
:return: Resized image.
:rtype: :py:class: `~PIL.Image.Image`
"""
image.thumbnail(size)
return image | 67db04eac8a92d27ebd3ec46c4946b7662f9c03f | 11,742 |
def one_hot_vector(val, lst):
"""Converts a value to a one-hot vector based on options in lst"""
if val not in lst:
val = lst[-1]
return map(lambda x: x == val, lst) | 401ff1d6666c392b3a217659929a4f7832c52522 | 11,743 |
def follow(request, username):
""" Add user with username to current user's following list """
request.user.followers.add(User.objects.get(username=username))
return redirect('accounts:followers') | 72530b32cfcb2282045cd2ef112df62a19e03239 | 11,744 |
def sesteva_stolpce(seznam_seznamov_stolpcev):
"""sešteje vse 'stolpce' v posameznem podseznamu """
matrika_stolpcev = []
for i in range(len(seznam_seznamov_stolpcev)):
sez = seznam_seznamov_stolpcev[i]
stolpec11 = sez[0]
while len(sez) > 1:
i = 0
stolpec22 = sez[1]
stolpec11 = vsota_stolpcev(stolpec11, stolpec22)
sez = sez[i+1:]
matrika_stolpcev.append(stolpec11)
return matrika_stolpcev | fe69368a79b60e549983a07e140ec3b5e532868e | 11,745 |
def create_response(data={}, status=200, message=''):
"""
Wraps response in a consistent format throughout the API
Format inspired by https://medium.com/@shazow/how-i-design-json-api-responses-71900f00f2db
Modifications included:
- make success a boolean since there's only 2 values
- make message a single string since we will only use one message per response
IMPORTANT: data must be a dictionary where:
- the key is the name of the type of data
- the value is the data itself
"""
response = {
'success': 200 <= status < 300,
'code': status,
'message': message,
'result': data
}
return jsonify(response), status | 51346e3a92bdf93085b12eaccc99511b66a34bcf | 11,746 |
def do_sizes_match(imgs):
"""Returns if sizes match for all images in list."""
return len([*filter(lambda x: x.size != x.size[0], imgs)]) > 0 | 7da30972ecfd4d3cac3d21ff380255865ec3b5c8 | 11,747 |
def gaussian_sampling(len_x, len_y, num_samples, spread_factor=5, origin_ball=1):
"""
Create a gaussian sampling pattern where each point is sampled from a
bivariate, concatenated normal distribution.
Args:
len_x (int): Size of output mask in x direction (width)
len_y (int): Size of output mask in y direction (height)
num_samples (int): Number of samples to pick
spread_factor (float): Concentration of samples (ie, the SD of the
probability distributions are len/spread_factor)
origin_ball (int): Radius of ball around origin where all samples
are included.
Returns:
np.ndarray: A boolean numpy array (mask) depicting sampling pattern.
"""
# Create two truncated normal distributions for x and y dir
lower = 0
upper_x = len_x
mu_x = len_x // 2
sigma_x = len_x // spread_factor
randgen_x = stats.truncnorm(
(lower - mu_x) / sigma_x,
(upper_x - mu_x) / sigma_x,
loc=mu_x,
scale=sigma_x
)
upper_y = len_y
mu_y = len_y // 2
sigma_y = len_y // spread_factor
randgen_y = stats.truncnorm(
(lower - mu_y) / sigma_y,
(upper_y - mu_y) / sigma_y,
loc=mu_y,
scale=sigma_y
)
# Create mask
mask = np.zeros([len_y, len_x], dtype=np.bool)
# Add origin ball
if origin_ball > 0:
y_grid, x_grid = np.ogrid[:len_y, :len_x]
dist_from_center = np.sqrt((y_grid - mu_y) ** 2 + (x_grid - mu_x) ** 2)
mask = dist_from_center <= origin_ball
# Subtract origin ball from number of samples
num_samples -= np.sum(mask)
# Sample points from distribution
xs = randgen_x.rvs(num_samples).astype(np.uint32)
ys = randgen_y.rvs(num_samples).astype(np.uint32)
for i in range(num_samples):
x, y = xs[i], ys[i]
# Ensure unique samples
while mask[y, x]:
x = randgen_x.rvs(1).astype(np.uint32)
y = randgen_y.rvs(1).astype(np.uint32)
xs[i], ys[i] = x, y
mask[y, x] = True
return mask | fce7396b02778aa832c5d24028fb1f55f1013b15 | 11,748 |
import json
def from_cx_jsons(graph_json_str: str) -> BELGraph:
"""Read a BEL graph from a CX JSON string."""
return from_cx(json.loads(graph_json_str)) | c61e415199ce0bfc610c1a4277aa8ba1b74a070a | 11,749 |
from typing import Tuple
def _calculate_dimensions(image: Image) -> Tuple[int, int]:
"""
Returns the width and height of the given pixel data.
The height of the image is the number of rows in the list,
while the width of the image is determined by the number of
pixels on the first row. It is assumed that each row contains
the same number of pixels.
:param image: pixel data
:return: width and height as a tuple
"""
try:
width = 0
height = len(image)
if height != 0:
width = len(image[0])
return width, height
except (IndexError, TypeError):
# Either data is not subscribable, or the
# length of the first row cannot be obtained.
raise ValueError("invalid pixel data - could not determine dimensions") | 7e74f181839b70e45cb64ca8b8517ef663c7caf8 | 11,750 |
def cli(ctx, invocation_id):
"""Get a summary of an invocation, stating the number of jobs which succeed, which are paused and which have errored.
Output:
The invocation summary.
For example::
{'states': {'paused': 4, 'error': 2, 'ok': 2},
'model': 'WorkflowInvocation',
'id': 'a799d38679e985db',
'populated_state': 'ok'}
"""
return ctx.gi.invocations.get_invocation_summary(invocation_id) | 94197a9c55c0d37b311585fdfce9d615c6986cb5 | 11,751 |
import numpy as np
def remove_observations_mean(data,data_obs,lats,lons):
"""
Removes observations to calculate model biases
"""
### Import modules
### Remove observational data
databias = data - data_obs[np.newaxis,np.newaxis,:,:,:]
return databias | 8f0cf60137660878f57dc35caa8c23896944d6ab | 11,752 |
import logging
def joint_extraction_model_fn(features, labels, mode, params):
"""Runs the node-level sequence labeling model."""
logging.info("joint_extraction_model_fn")
inputs = features # Arg "features" is the overall inputs.
# Read vocabs and inputs.
dropout = params["dropout"]
if params["circle_features"]:
nnodes, friend_has_label, (words, nwords), (
prev_text_words,
n_prev_text_words), (chars_list, chars_len_list), (partner_words, _), (
friends_words, n_friends_words), (friends_fix, friends_var), (
leaf_type_list, goldmine_feat_list), (_, _), (
node_xpath_list,
node_xpath_len_list), (attributes, attributes_plus_none), (
position_list) = inputs
else:
nnodes, (words, nwords), (prev_text_words, n_prev_text_words), (
chars_list, chars_len_list), (leaf_type_list, goldmine_feat_list), (
_, _), (node_xpath_list,
node_xpath_len_list), (attributes), (position_list) = inputs
# nnodes, the number of nodes in each page;
# shape is [?]; length is the number of pages.
# words, nwords are the node_text feature, shape is [?, ?, ?]
# the first two dimension is the batch * pages,
# the last one is the maximum length of the word lists
# prev_text_words, n_prev_text_words, similar as above for previous nodes'text
# chars_list, chars_len_list, shape is [?,?,?,?] also for node_text features
# the additional dim is for the length of the character sequences.
# friends_words, shape is [?, ?, ?], gathers all the words from different
# friends of one node.
# friends_fix, friends_var, shapes are [?, ?, ?, ?]
# the first two dimension is the batch * pages,
# the last two are the maximum length of friend nodes and words.
nnodes = merge_first_two_dims(nnodes)
training = (mode == tf.estimator.ModeKeys.TRAIN)
vocab_words = _index_table_from_file(
params["words"], num_oov_buckets=params["num_oov_buckets"])
with tf.gfile.Open(params["tags"]) as f:
indices = [idx for idx, tag in enumerate(f) if tag.strip() != "none"]
num_tags = len(indices) + 1 # Make "None" as the tag with the last index.
# NodeText Char Embeddings.
with tf.gfile.Open(params["chars"]) as f:
num_chars = sum(1 for _ in f) + params["num_oov_buckets"]
vocab_chars = _index_table_from_file(
params["chars"], num_oov_buckets=params["num_oov_buckets"])
char_ids = vocab_chars.lookup(chars_list)
variable = tf.get_variable("chars_embeddings",
[num_chars + 1, params["dim_chars"]], tf.float32)
char_embeddings = tf.nn.embedding_lookup(variable, char_ids)
char_embeddings = tf.layers.dropout(
char_embeddings, rate=dropout, training=training)
logging.info("char_embeddings.shape: %s", char_embeddings.shape)
# Char 1d convolution.
weights = tf.sequence_mask(chars_len_list)
char_embeddings = masked_conv1d_and_max(char_embeddings, weights,
params["filters"],
params["kernel_size"])
logging.info("char_embeddings.shape after CNN: %s", char_embeddings.shape)
# Word Embeddings.
word_ids = vocab_words.lookup(words)
glove = np.load(tf.gfile.Open(params["glove"],
"rb"))["embeddings"] # np.array
variable = np.vstack([glove, [[0.] * params["dim_word_embedding"]]])
# To finetune the GloVe embedding by setting trainable as True.
variable = tf.Variable(variable, dtype=tf.float32, trainable=True)
word_embeddings = tf.nn.embedding_lookup(variable, word_ids)
logging.info("word_embeddings.shape: %s", word_embeddings.shape)
# Prev_Text Representations.
prev_text_word_ids = vocab_words.lookup(prev_text_words)
prev_text_word_embeddings = tf.nn.embedding_lookup(variable,
prev_text_word_ids)
if params["use_prev_text_lstm"]:
# PREV_text LSTM.
logging.info("prev_text_representation using lstm")
prev_t = merge_first_two_dims(prev_text_word_embeddings)
# Seq * batch * input
prev_t = tf.transpose(prev_t, perm=[1, 0, 2]) # Need time-major.
prev_output_fw, prev_output_bw = _bidirectional_lstm(
prev_t, params["lstm_size"], merge_first_two_dims(n_prev_text_words))
prev_output = tf.concat([prev_output_fw, prev_output_bw], axis=-1)
prev_output = tf.reduce_mean(prev_output, 0)
prev_output = tf.layers.dropout(
prev_output, rate=dropout, training=training)
logging.info("prev_output.shape (after reduce_mean): %s", prev_output.shape)
context_representation = split_first_two_dims_by_example(
prev_output, prev_text_word_embeddings)
logging.info("context_representation.shape (after split): %s",
context_representation.shape)
else:
logging.info("prev_text_word_embeddings.shape: %s",
prev_text_word_embeddings.shape)
context_representation = tf.reduce_mean(prev_text_word_embeddings, 2)
logging.info("context_representation.shape: %s",
context_representation.shape)
if params["circle_features"]:
partner_embeddings, circle_representation = circle_feature_modeling(
variable, vocab_words, partner_words, friends_words, n_friends_words,
friends_fix, friends_var, word_embeddings, dropout, training, params)
context_representation = circle_representation
if params["use_friend_semantic"]:
friends_ids = vocab_words.lookup(friends_words)
friend_embeddings = tf.nn.embedding_lookup(variable, friends_ids)
if params["use_xpath_lstm"]:
h_output = xpath_feature_modeling(node_xpath_list, node_xpath_len_list,
training, params)
context_representation = tf.concat([h_output, context_representation],
axis=2)
if params["use_position_embedding"]:
position_representation = position_modeling(position_list, params)
context_representation = tf.concat(
[context_representation, position_representation], axis=2)
# Text Embeddings: Concatenate Word and Char and Feature Embeddings.
embeddings = tf.concat([word_embeddings, char_embeddings], axis=-1)
embeddings = tf.layers.dropout(embeddings, rate=dropout, training=training)
logging.info("embeddings.shape: %s", embeddings.shape)
# LSTM inside node texts.
t = merge_first_two_dims(embeddings)
t = tf.transpose(t, perm=[1, 0, 2]) # Need time-major.
output_fw, output_bw = _bidirectional_lstm(t, params["lstm_size"],
merge_first_two_dims(nwords))
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.reduce_mean(output, 0)
output = tf.layers.dropout(output, rate=dropout, training=training)
logging.info("output.shape (after reduce_mean): %s", output.shape)
output = split_first_two_dims_by_example(output, embeddings)
logging.info("output.shape (after split): %s", output.shape)
node_seq_input = tf.concat([output, context_representation], axis=2)
logging.info("output.shape (after + prev): %s", node_seq_input.shape)
# Leaf Type Features.
if params["add_leaf_types"]:
with tf.gfile.Open(params["leaf_types"]) as f:
num_leaf_types = sum(1 for _ in f) + params["num_oov_buckets"]
vocab_leaf_types = _index_table_from_file(
params["leaf_types"], num_oov_buckets=params["num_oov_buckets"])
leaf_type_ids = vocab_leaf_types.lookup(leaf_type_list)
leaf_variable = tf.get_variable(
"leaf_type_embeddings", [num_leaf_types + 1, params["dim_leaf_type"]],
tf.float32)
leaf_type_embeddings = tf.nn.embedding_lookup(leaf_variable, leaf_type_ids)
leaf_type_embeddings = tf.layers.dropout(
leaf_type_embeddings, rate=dropout, training=training)
logging.info("leaf_type_embeddings.shape: %s", char_embeddings.shape)
logging.info("node_seq_input.shape before leaf: %s", node_seq_input.shape)
node_seq_input = tf.concat([node_seq_input, leaf_type_embeddings], axis=2)
logging.info("node_seq_input.shape after leaf: %s", node_seq_input.shape)
# Goldmine Feat Embeddings.
if params["add_goldmine"]:
vocab_goldmine_features = _index_table_from_file(
params["goldmine_features"], num_oov_buckets=1)
goldmine_feature_variable = tf.get_variable("goldmine_feature_embeddings",
[8 + 1, params["dim_goldmine"]],
tf.float32)
goldmine_feat_ids = vocab_goldmine_features.lookup(goldmine_feat_list)
goldmine_feat_embeddings = tf.nn.embedding_lookup(goldmine_feature_variable,
goldmine_feat_ids)
goldmine_feat_embeddings = tf.reduce_sum(goldmine_feat_embeddings, 2)
logging.info("goldmine_feat_embeddings.shape: %s",
goldmine_feat_embeddings.shape)
node_seq_input = tf.concat([node_seq_input, goldmine_feat_embeddings],
axis=2)
logging.info("node_seq_input.shape after goldmine: %s",
node_seq_input.shape)
# Node-level LSTM modeling.
if params["node_encoder"] == "lstm":
# Node-Sequence-LSTM.
n_t = tf.transpose(node_seq_input, perm=[1, 0, 2]) # Need time-major.
node_output_fw, node_output_bw = _bidirectional_lstm(
n_t, params["node_lstm_size"], nnodes)
node_seq_output = tf.concat([node_output_fw, node_output_bw], axis=-1)
node_seq_output = tf.transpose(node_seq_output, perm=[1, 0, 2])
elif params["node_encoder"] == "cnn":
node_weights = tf.sequence_mask(nnodes)
node_seq_output = masked_conv1d_and_max(
node_seq_input,
node_weights,
params["node_filters"],
params["node_kernel_size"],
reducemax=False)
elif params["node_encoder"] == "transformer":
# Node-Sequence-Transformer.
node_seq_output = transformer_encoding(node_seq_input, nnodes, params, mode)
else:
node_seq_output = node_seq_input
logging.info("node_seq_input.shape after encoder: %s", node_seq_output.shape)
if params["node_encoder"] != "transformer":
# Add the dropout layer if the encoder is not a transformer.
node_seq_output = tf.layers.dropout(
node_seq_output, rate=dropout, training=training)
if params["use_friends_discrete_feature"] and params["circle_features"]:
friend_has_label = tf.expand_dims(friend_has_label, axis=-1)
node_seq_output = tf.concat([node_seq_output, friend_has_label], axis=-1)
logging.info("node_seq_input.shape after friend_has_label: %s",
node_seq_output.shape)
node_seq_output = tf.layers.dense(node_seq_output,
params["last_hidden_layer_size"])
logits = tf.layers.dense(node_seq_output, num_tags, name="label_dense_1")
if params["semantic_encoder"] and params["circle_features"]:
partner_similarity_emb = semantic_similarity(variable, vocab_words,
partner_embeddings, attributes,
params)
node_seq_output = tf.concat(
[node_seq_output,
tf.nn.softmax(partner_similarity_emb)], axis=-1)
logging.info("node_seq_output.shape after semantic encoder: %s",
node_seq_output.shape)
if params["use_friend_semantic"]:
friends_similarity_emb = semantic_similarity(variable, vocab_words,
friend_embeddings,
attributes, params)
node_seq_output = tf.concat([node_seq_output, friends_similarity_emb],
axis=-1)
if params["objective"] == "classification":
node_seq_output = tf.layers.dense(
node_seq_output, params["dim_word_embedding"], activation="relu")
node_seq_output = tf.layers.dense(node_seq_output,
params["last_hidden_layer_size"])
logging.info("node_seq_output.shape after semantic encoder: %s",
node_seq_output.shape)
logits = tf.layers.dense(node_seq_output, num_tags, name="label_dense_2")
elif params["objective"] == "semantic_scorer":
logits = semantic_scorer(attributes_plus_none, node_seq_output, params)
elif params["objective"] == "binary_scorer":
logits = binary_scorer(attributes_plus_none, node_seq_output, training,
params)
if params["use_crf"]:
# CRF Layer.
logging.info("logits.shape: %s", logits.shape)
crf_params = tf.get_variable("crf", [num_tags, num_tags], dtype=tf.float32)
pred_ids, _ = tfa.text.crf.crf_decode(logits, crf_params, nnodes)
logging.info("pred_ids.shape: %s", pred_ids.shape)
else:
pred_ids = tf.argmax(logits, 2)
logging.info("pred_ids.shape: %s", pred_ids.shape)
# Predict for new sentences in target set.
if mode == tf.estimator.ModeKeys.PREDICT:
reverse_vocab_tags = _index_table_from_file(params["tags"], 1)
pred_strings = reverse_vocab_tags.lookup(tf.strings.as_string(pred_ids))
predictions = {
"pred_ids": pred_ids,
"tags": pred_strings,
"scores": tf.nn.softmax(logits),
"raw_scores": logits,
}
# Store the intermediate weights.
if params["semantic_encoder"]:
predictions["similarity"] = partner_similarity_emb
if params["friend_encoder"]:
predictions["friends_embs"] = circle_representation
if params["extract_node_emb"]:
predictions["node_embs"] = node_seq_output
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
vocab_tags = _index_table_from_file(params["tags"], 1)
tags = vocab_tags.lookup(labels)
logging.info("tags.shape: %s", logits.shape)
logging.info(
"Parameter size: %s",
np.sum(
[np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
if params["use_crf"]:
log_likelihood, _ = tfa.text.crf.crf_log_likelihood(logits, tags, nnodes,
crf_params)
loss = tf.reduce_mean(-log_likelihood)
else:
loss = tf.losses.sparse_softmax_cross_entropy(labels=tags, logits=logits)
# Processing the metrics.
weights = tf.sequence_mask(nnodes)
metrics = {
"acc":
tf.metrics.accuracy(tags, pred_ids, weights),
"precision":
seq_tagging_metric_util.precision(tags, pred_ids, num_tags, indices,
weights),
"recall":
seq_tagging_metric_util.recall(tags, pred_ids, num_tags, indices,
weights),
"f1":
seq_tagging_metric_util.f1(tags, pred_ids, num_tags, indices,
weights),
}
for metric_name, op in metrics.items():
tf.summary.scalar(metric_name, op[1])
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.name_scope("train_scope"):
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.minimize(
loss, global_step=tf.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=metrics) | 1e6eb2028c8924733329bc4fc3079ca12af12d94 | 11,753 |
def make_ss_matrices(sigma_x, dt):
"""
To make Q full-rank for inversion (so the mle makes sense), use:
Q = [ dt**2 dt/2
dt/2 1 ]
to approximate Q = (dt 1)(dt 1)'
System:
x = [p_x p_y v_x v_y]
y = [p_x' p_y']
:param sigma_x:
:param dt:
:return:
sigma_0: starting value for sigma_v, with process variance (sigma_v^2 Q)
"""
i2 = np.eye(2)
_ = np.zeros((2, 2))
A = np.block([
[i2, dt*i2],
[_, i2],
])
Q = np.block([
[dt**2 * i2, dt*i2 * .5],
[dt*i2 * .5, i2],
])
C = np.block([i2, _])
R = sigma_x**2 * i2
sigma_0 = float(sigma_x) / 2
return A, Q, C, R, sigma_0 | 551a1d46ee67360e159ab966c0b3f30dd77254c8 | 11,755 |
def get_icp_val(tmr):
"""Read input capture value"""
return peek(tmr + ICRx) | (peek(tmr + ICRx + 1) << 8) | 0aef45e0c6edeb3c6540a51ae44013ded03c7be7 | 11,756 |
import torch
def validate(segmenter, val_loader, epoch, num_classes=-1):
"""Validate segmenter
Args:
segmenter (nn.Module) : segmentation network
val_loader (DataLoader) : training data iterator
epoch (int) : current epoch
num_classes (int) : number of classes to consider
Returns:
Mean IoU (float)
"""
val_loader.dataset.set_stage("val")
segmenter.eval()
cm = np.zeros((num_classes, num_classes), dtype=int)
with torch.no_grad():
for i, sample in enumerate(val_loader):
input = sample["image"]
target = sample["mask"]
input_var = torch.autograd.Variable(input).float().cuda()
# Compute output
output = segmenter(input_var)
output = (
cv2.resize(
output[0, :num_classes].data.cpu().numpy().transpose(1, 2, 0),
target.size()[1:][::-1],
interpolation=cv2.INTER_CUBIC,
)
.argmax(axis=2)
.astype(np.uint8)
)
# Compute IoU
gt = target[0].data.cpu().numpy().astype(np.uint8)
gt_idx = (
gt < num_classes
) # Ignore every class index larger than the number of classes
cm += fast_cm(output[gt_idx], gt[gt_idx], num_classes)
if i % args.print_every == 0:
logger.info(
" Val epoch: {} [{}/{}]\t"
"Mean IoU: {:.3f}".format(
epoch, i, len(val_loader), compute_iu(cm).mean()
)
)
ious = compute_iu(cm)
logger.info(" IoUs: {}".format(ious))
miou = np.mean(ious)
logger.info(" Val epoch: {}\tMean IoU: {:.3f}".format(epoch, miou))
return miou | 716f1eda3a283c2707fa8ffd6e8073c351bda560 | 11,757 |
def detect_forward(CoreStateMachine, PostConditionStateMachine):
"""A 'forward ambiguity' denotes a case where the post condition
implementation fails. This happens if an iteration in the core pattern is a
valid path in the post- condition pattern. In this case no decision can be
made about where to reset the input position.
Example: x+/x At the end of the post condition an incoming
'x' guides through a path in the post condition
and the core pattern. It cannot be determined
by a flag where the input position ends.
NOTE: For many cases where there is a forward ambiguity quex can gnerate an
inverse post-condition that goes backwards from the end of the post
condition (see function 'mount()'). However, there are cases where even
this is not possible (see function 'detect_backward()').
"""
## print_callstack()
__assert_state_machines(CoreStateMachine, PostConditionStateMachine)
core_acceptance_state_list = CoreStateMachine.get_acceptance_state_list()
pcsm_init_state = PostConditionStateMachine.get_init_state()
for csm_state in core_acceptance_state_list:
if __dive_to_detect_iteration(CoreStateMachine, csm_state,
PostConditionStateMachine, pcsm_init_state):
return True
return False | 4d6c1952a201f3505b0770f12f42609847728a54 | 11,758 |
def price_sensitivity(results):
"""
Calculate the price sensitivity of a strategy
results
results dataframe or any dataframe with the columns
open, high, low, close, profit
returns
the percentage of returns sensitive to open price
Note
-----
Price sensitivity is calculated by
1) Calculating the profit in cases where open=high and open=low
2) Dividing these profits by the total profits
A high percentage indicates that most of your orders may not get
executed at the LIMIT price since the stock tends have a sharp
movement when open=low or open=high. A value of 1 indicates that
all returns are sensitive to prices
This is somewhat a rough measure and it doesn't take into account
whether you BUY or SELL
"""
profit = results["profit"].sum()
sen1 = results.query("open==low")["profit"].sum()
sen2 = results.query("open==high")["profit"].sum()
return (sen1 + sen2) / profit | 02ab811bf689e760e011db6d091dcb7c3079f0d1 | 11,759 |
def wrap_zone(tz, key=KEY_SENTINEL, _cache={}):
"""Wrap an existing time zone object in a shim class.
This is likely to be useful if you would like to work internally with
non-``pytz`` zones, but you expose an interface to callers relying on
``pytz``'s interface. It may also be useful for passing non-``pytz`` zones
to libraries expecting to use ``pytz``'s interface.
:param tz:
A :pep:`495`-compatible time zone, such as those provided by
:mod:`dateutil.tz` or :mod:`zoneinfo`.
:param key:
The value for the IANA time zone key. This is optional for ``zoneinfo``
zones, but required for ``dateutil.tz`` zones.
:return:
A shim time zone.
"""
if key is KEY_SENTINEL:
key = getattr(tz, "key", KEY_SENTINEL)
if key is KEY_SENTINEL:
raise TypeError(
"The `key` argument is required when wrapping zones that do not "
+ "have a `key` attribute."
)
instance = _cache.get((id(tz), key), None)
if instance is None:
instance = _cache.setdefault((id(tz), key), _PytzShimTimezone(tz, key))
return instance | 7776153859b30ee758b16498b1122d0af294d371 | 11,760 |
async def async_browse_media(
hass, media_content_type, media_content_id, *, can_play_artist=True
):
"""Browse Spotify media."""
info = list(hass.data[DOMAIN].values())[0]
return await async_browse_media_internal(
hass,
info[DATA_SPOTIFY_CLIENT],
info[DATA_SPOTIFY_ME],
media_content_type,
media_content_id,
can_play_artist=can_play_artist,
) | d3f912ecbd8949a637d461a453a4b9a9ea73a20c | 11,761 |
from typing import Tuple
import json
def bounds(url: str) -> Tuple[str, str, str]:
"""Handle bounds requests."""
info = main.bounds(url)
return ("OK", "application/json", json.dumps(info)) | 2da1ec2db8b2c0c3a3d28854dc2f68b71aa96bf1 | 11,762 |
import email
def make_message_id():
"""
Generates rfc message id. The returned message id includes the angle
brackets.
"""
return email.utils.make_msgid('sndlatr') | 7030efe1d61f4e54d833bb5c808f582689c626c6 | 11,763 |
def _understand_err_col(colnames):
"""Get which column names are error columns
Examples
--------
>>> colnames = ['a', 'a_err', 'b', 'b_perr', 'b_nerr']
>>> serr, terr = _understand_err_col(colnames)
>>> np.allclose(serr, [1])
True
>>> np.allclose(terr, [2])
True
>>> serr, terr = _understand_err_col(['a', 'a_nerr'])
Traceback (most recent call last):
...
ValueError: Missing positive error...
>>> serr, terr = _understand_err_col(['a', 'a_perr'])
Traceback (most recent call last):
...
ValueError: Missing negative error...
"""
shift = 0
serr = []
terr = []
for i, col in enumerate(colnames):
if col.endswith("_err"):
# The previous column, but they're numbered from 1!
# Plus, take shift into account
serr.append(i - shift)
shift += 1
elif col.endswith("_perr"):
terr.append(i - shift)
if len(colnames) == i + 1 or not colnames[i + 1].endswith('_nerr'):
raise ValueError("Missing negative error")
shift += 2
elif col.endswith("_nerr") and not colnames[i - 1].endswith('_perr'):
raise ValueError("Missing positive error")
return serr, terr | 2fab9346a3ea8fa6e84e406856eef8ad14ad9f66 | 11,764 |
def unpivot(frame):
"""
Example:
>>> df
date variable value
0 2000-01-03 A 0.895557
1 2000-01-04 A 0.779718
2 2000-01-05 A 0.738892
3 2000-01-03 B -1.513487
4 2000-01-04 B -0.543134
5 2000-01-05 B 0.902733
6 2000-01-03 C -0.053496
7 2000-01-04 C 0.298079
8 2000-01-05 C -1.962022
9 2000-01-03 D -0.174269
10 2000-01-04 D -0.047428
11 2000-01-05 D -1.871996
>>> tm.makeTimeDataFrame(3)
A B C D
2000-01-03 -0.911447 0.274853 -0.740769 2.330942
2000-01-04 -0.208471 -1.024612 0.512266 -0.708707
2000-01-05 -1.368389 -3.464163 -1.940530 -1.149835
"""
N, K = frame.shape
data = {
"value": frame.to_numpy().ravel("F"),
"variable": np.asarray(frame.columns).repeat(N),
"date": np.tile(np.asarray(frame.index), K),
}
return pd.DataFrame(data, columns=["date", "variable", "value"]) | 6cda1c29e7e7c9b4176e83b6a0e1d907458721b2 | 11,765 |
import type
def find_viable_generators_aux (target_type, prop_set):
""" Returns generators which can be used to construct target of specified type
with specified properties. Uses the following algorithm:
- iterates over requested target_type and all it's bases (in the order returned bt
type.all-bases.
- for each type find all generators that generate that type and which requirements
are satisfied by properties.
- if the set of generators is not empty, returns that set.
Note: this algorithm explicitly ignores generators for base classes if there's
at least one generator for requested target_type.
"""
# Select generators that can create the required target type.
viable_generators = []
initial_generators = []
# Try all-type generators first. Assume they have
# quite specific requirements.
all_bases = type.all_bases(target_type)
for t in all_bases:
initial_generators = __type_to_generators.get(t, [])
if initial_generators:
dout("there are generators for this type")
if t != target_type:
# We're here, when no generators for target-type are found,
# but there are some generators for a base type.
# We'll try to use them, but they will produce targets of
# base type, not of 'target-type'. So, we clone the generators
# and modify the list of target types.
generators2 = []
for g in initial_generators[:]:
# generators.register adds generator to the list of generators
# for toolsets, which is a bit strange, but should work.
# That list is only used when inheriting toolset, which
# should have being done before generators are run.
ng = g.clone_and_change_target_type(t, target_type)
generators2.append(ng)
register(ng)
initial_generators = generators2
break
for g in initial_generators:
dout("trying generator " + g.id()
+ "(" + str(g.source_types()) + "->" + str(g.target_types()) + ")")
m = g.match_rank(prop_set)
if m:
dout(" is viable")
viable_generators.append(g)
return viable_generators | 40764a16b17b54c28495e08623d616d0927451d1 | 11,766 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.