content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def detect_version(conn):
"""
Detect the version of the database. This is typically done by reading the
contents of the ``configuration`` table, but before that was added we can
guess a couple of versions based on what tables exist (or don't). Returns
``None`` if the database appears uninitialized, and raises
:exc:`RuntimeError` is the version is so ancient we can't do anything with
it.
"""
try:
with conn.begin():
db_version = conn.scalar(text(
"SELECT version FROM configuration"))
except exc.ProgrammingError:
with conn.begin():
packages_exists = bool(conn.scalar(text(
"SELECT 1 FROM pg_catalog.pg_tables "
"WHERE schemaname = 'public' AND tablename = 'packages'")))
with conn.begin():
statistics_exists = bool(conn.scalar(text(
"SELECT 1 FROM pg_catalog.pg_views "
"WHERE schemaname = 'public' AND viewname = 'statistics'")))
with conn.begin():
files_exists = bool(conn.scalar(text(
"SELECT 1 FROM pg_catalog.pg_tables "
"WHERE schemaname = 'public' AND tablename = 'files'")))
if not packages_exists:
# Database is uninitialized
return None
elif not files_exists:
# Database is too ancient to upgrade
raise RuntimeError("Database version older than 0.4; cannot upgrade")
elif not statistics_exists:
return "0.4"
else:
return "0.5"
else:
return db_version | 6429dbb1e1767cf6fd93c3fd240ce095f1b50ef7 | 6,979 |
def nIonDotBHmodel2(z):
"""Ionization model 2 from BH2007: constant above z=6.
"""
return ((z < 6) * nIonDotLowz(z) +
(z >= 6) * nIonDotLowz(6)) | 438cdd69a229e445f8e313145e84ed11618ee2cb | 6,980 |
def answer(input):
"""
>>> answer("1234")
1234
"""
lines = input.split('\n')
for line in lines:
return int(line) | b9ce42d88a09976444563493a01741475dce67c5 | 6,981 |
def get_leading_states(contributions):
"""
Return state contributions, names as lists in descending order of contribution amount
:param contributions:
:return:
"""
contributions['state'] = contributions['clean_fips'].apply(get_state)
states = contributions.groupby('state')
state_sums = states.sum()
ordered_sums = state_sums.sort('clean_contribution', ascending=False)['clean_contribution']
names = list(ordered_sums.index)
values = list(ordered_sums)
unwanted = ['NO_STATE_NAME', 'american samoa',
'northern mariana islands', 'guam', 'virgin islands', 'puerto rico']
state_contributions = []
state_names = []
for i in range(0, len(values)):
amount = values[i]
name = names[i]
if name not in unwanted:
state_contributions.append(amount)
state_names.append(name)
return state_contributions, state_names | 7028f87ad7b106e267104dddebc2fe42546d3cfd | 6,982 |
def contacts_per_person_normal_self_20():
"""
Real Name: b'contacts per person normal self 20'
Original Eqn: b'30'
Units: b'contact/Day'
Limits: (None, None)
Type: constant
b''
"""
return 30 | 4a240066b2aefd8af2e19f174632e1bf854bf7d3 | 6,983 |
def __compute_partition_gradient(data, fit_intercept=True):
"""
Compute hetero regression gradient for:
gradient = ∑d*x, where d is fore_gradient which differ from different algorithm
Parameters
----------
data: DTable, include fore_gradient and features
fit_intercept: bool, if model has interception or not. Default True
Returns
----------
numpy.ndarray
hetero regression model gradient
"""
feature = []
fore_gradient = []
for key, value in data:
feature.append(value[0])
fore_gradient.append(value[1])
feature = np.array(feature)
fore_gradient = np.array(fore_gradient)
gradient = []
if feature.shape[0] <= 0:
return 0
for j in range(feature.shape[1]):
feature_col = feature[:, j]
gradient_j = fate_operator.dot(feature_col, fore_gradient)
gradient.append(gradient_j)
if fit_intercept:
bias_grad = np.sum(fore_gradient)
gradient.append(bias_grad)
return np.array(gradient) | e987fc53b1f1ee8cc7a0ddbe83de23b1623b532e | 6,984 |
def calc_nsd(x, n=21):
"""
Estimate Noise Standard Deviation of Data.
Parameters
----------
x : 1d-ndarray
Input data.
n : int
Size of segment.
Returns
-------
result : float
Value of noise standard deviation.
"""
x_diff = np.diff(x, n=2)
x_frag = np.array_split(x_diff, len(x_diff) // n)
cursor = np.argmin([np.std(i, ddof=1) for i in x_frag])
for i in range(n * (cursor + 1), len(x_diff)):
i_frag = x_diff[i-n:i-1]
i_frag_avg = np.mean(i_frag)
i_frag_std = np.std(i_frag, ddof=1)
if np.abs(x_diff[i] - i_frag_avg) > 3 * i_frag_std:
x_diff[i] = i_frag_avg
for i in range(0, n * cursor - 1)[::-1]:
if n * cursor - 1 < 0:
break
i_frag = x_diff[i+1:i+n]
i_frag_avg = np.mean(i_frag)
i_frag_std = np.std(i_frag, ddof=1)
if np.abs(x_diff[i] - i_frag_avg) > 3 * i_frag_std:
x_diff[i] = i_frag_avg
return np.std(x_diff, ddof=1) / 6 ** 0.5 | 23b0041fc1a9bde364828a0a94b12fc7292a391a | 6,985 |
def deflection_from_kappa_grid_adaptive(kappa_high_res, grid_spacing, low_res_factor, high_res_kernel_size):
"""
deflection angles on the convergence grid with adaptive FFT
the computation is performed as a convolution of the Green's function with the convergence map using FFT
The grid is returned in the lower resolution grid
:param kappa_high_res: convergence values for each pixel (2-d array)
:param grid_spacing: pixel size of high resolution grid
:param low_res_factor: lower resolution factor of larger scale kernel.
:param high_res_kernel_size: int, size of high resolution kernel in units of degraded pixels
:return: numerical deflection angles in x- and y- direction
"""
kappa_low_res = image_util.re_size(kappa_high_res, factor=low_res_factor)
num_pix = len(kappa_high_res) * 2
if num_pix % 2 == 0:
num_pix += 1
#if high_res_kernel_size % low_res_factor != 0:
# assert ValueError('fine grid kernel size needs to be a multiplicative factor of low_res_factor! Settings used: '
# 'fine_grid_kernel_size=%s, low_res_factor=%s' % (high_res_kernel_size, low_res_factor))
kernel_x, kernel_y = deflection_kernel(num_pix, grid_spacing)
grid_spacing_low_res = grid_spacing * low_res_factor
kernel_low_res_x, kernel_high_res_x = kernel_util.split_kernel(kernel_x, high_res_kernel_size, low_res_factor,
normalized=False)
f_x_high_res = scp.fftconvolve(kappa_high_res, kernel_high_res_x, mode='same') / np.pi * grid_spacing ** 2
f_x_high_res = image_util.re_size(f_x_high_res, low_res_factor)
f_x_low_res = scp.fftconvolve(kappa_low_res, kernel_low_res_x, mode='same') / np.pi * grid_spacing_low_res ** 2
f_x = f_x_high_res + f_x_low_res
kernel_low_res_y, kernel_high_res_y = kernel_util.split_kernel(kernel_y, high_res_kernel_size, low_res_factor,
normalized=False)
f_y_high_res = scp.fftconvolve(kappa_high_res, kernel_high_res_y, mode='same') / np.pi * grid_spacing ** 2
f_y_high_res = image_util.re_size(f_y_high_res, low_res_factor)
f_y_low_res = scp.fftconvolve(kappa_low_res, kernel_low_res_y, mode='same') / np.pi * grid_spacing_low_res ** 2
f_y = f_y_high_res + f_y_low_res
return f_x, f_y | cc71b9bd35c5e09e45815cf578870c481a03b8ed | 6,986 |
from params import pop_sizes
def remove_sus_from_Reff(strain, data_date):
"""
This removes the inferred susceptibility depletion from the Reff estimates out of EpyReff.
The inferred Reff = S(t) * Reff_1 where S(t) is the effect of susceptible depletion (i.e. a
factor between 0 and 1) and Reff_1 is the Reff without the effect of a reducing susceptibility
pool.
"""
data_date = pd.to_datetime(data_date)
# read in Reff samples
df_Reff = pd.read_csv(
"results/EpyReff/Reff_"
+ strain
+ "_samples"
+ data_date.strftime("%Y-%m-%d")
+ "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
# read in assumed CA
CA = pd.read_csv(
"results/"
+ "CA_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# read in cases by infection dates
cases = pd.read_csv(
"results/"
+ "cases_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date_inferred"]
)
# scale the local cases by the assumed CA
cases["local_scaled"] = cases["local"]
cases.loc[cases.date_inferred <= pd.to_datetime("2021-12-09"), "local_scaled"] *= 1 / 0.75
cases.loc[cases.date_inferred > pd.to_datetime("2021-12-09"), "local_scaled"] *= 1 / 0.50
# read in the inferred susceptibility depletion factor and convert to a simple array
samples = pd.read_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
sus_dep_factor = samples["phi"][:2000]
sus_dep_factor.to_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "sampled_susceptible_depletion_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
sus_dep_factor = sus_dep_factor.to_numpy()
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
# init a dataframe to hold the Reff samples without susceptible depletion
df_Reff_adjusted = pd.DataFrame()
df_cases_adjusted = pd.DataFrame()
for state in states:
# filter cases by the state and after March 2020
cases_state = cases.loc[cases.STATE == state]
dates_complete = pd.DataFrame(
pd.date_range(
start=df_Reff.INFECTION_DATES.min(),
end=max(df_Reff.INFECTION_DATES)
),
columns=["date_inferred"],
)
# merging on date_inferred forces missing dates to be added into cases_state
cases_state = dates_complete.merge(right=cases_state, how='left', on='date_inferred')
cases_state.fillna(0, inplace=True)
cases_state.loc[cases_state.date_inferred <= "2021-06-25", "local_scaled"] = 0
cases_state["cum_local_scaled"] = cases_state["local_scaled"].cumsum()
df_cases_adjusted = pd.concat((df_cases_adjusted, cases_state), axis=0)
cases_state = cases_state.cum_local_scaled.to_numpy()
cases_state = np.tile(cases_state, (2000, 1)).T
# invert the susceptible depletion factor for the model
scaling_factor = 1 / (1 - sus_dep_factor * cases_state / pop_sizes[state])
df_Reff_state = df_Reff.loc[df_Reff.STATE == state]
df_Reff_state.iloc[:, :-2] = df_Reff_state.iloc[:, :-2] * scaling_factor
df_Reff_adjusted = pd.concat((df_Reff_adjusted, df_Reff_state), axis=0)
# save the unscaled Reff
df_Reff_adjusted.to_csv(
"results/EpyReff/Reff_"
+ strain
+ "_samples"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
index=False,
)
df_cases_adjusted.to_csv(
"results/EpyReff/cases_adjusted_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
index=False,
)
return None | 9342896ff84507ecbe93b96a81b781ed6f8c336e | 6,987 |
def word2bytes(word, big_endian=False):
""" Converts a 32-bit word into a list of 4 byte values.
"""
return unpack_bytes(pack_word(word, big_endian)) | 9c208efc87bb830692771f3dacb1618a1d8d7da4 | 6,988 |
def statfcn(status, _id, _ret):
"""
Callback for libngspice to report simulation status like 'tran 5%'
"""
logger.warn(status.decode('ascii'))
return 0 | 344210160227ae76470f53eecd43c913b9dec495 | 6,989 |
def decode_eventdata(sensor_type, offset, eventdata, sdr):
"""Decode extra event data from an alert or log
Provide a textual summary of eventdata per descriptions in
Table 42-3 of the specification. This is for sensor specific
offset events only.
:param sensor_type: The sensor type number from the event
:param offset: Sensor specific offset
:param eventdata: The three bytes from the log or alert
"""
if sensor_type == 5 and offset == 4: # link loss, indicates which port
return 'Port {0}'.format(eventdata[1])
elif sensor_type == 8 and offset == 6: # PSU cfg error
errtype = eventdata[2] & 0b1111
return psucfg_errors.get(errtype, 'Unknown')
elif sensor_type == 0xc and offset == 8: # Memory spare
return 'Module {0}'.format(eventdata[2])
elif sensor_type == 0xf:
if offset == 0: # firmware error
return firmware_errors.get(eventdata[1], 'Unknown')
elif offset in (1, 2):
return firmware_progress.get(eventdata[1], 'Unknown')
elif sensor_type == 0x10:
if offset == 0: # Correctable error logging on a specific memory part
return 'Module {0}'.format(eventdata[1])
elif offset == 1:
return 'Reading type {0:02X}h, offset {1:02X}h'.format(
eventdata[1], eventdata[2] & 0b1111)
elif offset == 5:
return '{0}%'.format(eventdata[2])
elif offset == 6:
return 'Processor {0}'.format(eventdata[1])
elif sensor_type == 0x12:
if offset == 3:
action = (eventdata[1] & 0b1111000) >> 4
return auxlog_actions.get(action, 'Unknown')
elif offset == 4:
sysactions = []
if eventdata[1] & 0b1 << 5:
sysactions.append('NMI')
if eventdata[1] & 0b1 << 4:
sysactions.append('OEM action')
if eventdata[1] & 0b1 << 3:
sysactions.append('Power Cycle')
if eventdata[1] & 0b1 << 2:
sysactions.append('Reset')
if eventdata[1] & 0b1 << 1:
sysactions.append('Power Down')
if eventdata[1] & 0b1:
sysactions.append('Alert')
return ','.join(sysactions)
elif offset == 5: # Clock change event, either before or after
if eventdata[1] & 0b10000000:
return 'After'
else:
return 'Before'
elif sensor_type == 0x19 and offset == 0:
return 'Requested {0] while {1}'.format(eventdata[1], eventdata[2])
elif sensor_type == 0x1d and offset == 7:
return restart_causes.get(eventdata[1], 'Unknown')
elif sensor_type == 0x21:
return '{0} {1}'.format(slot_types.get(eventdata[1], 'Unknown'),
eventdata[2])
elif sensor_type == 0x23:
phase = eventdata[1] & 0b1111
return watchdog_boot_phases.get(phase, 'Unknown')
elif sensor_type == 0x28:
if offset == 4:
return 'Sensor {0}'.format(eventdata[1])
elif offset == 5:
islogical = (eventdata[1] & 0b10000000)
if islogical:
if eventdata[2] in sdr.fru:
return sdr.fru[eventdata[2]].fru_name
else:
return 'FRU {0}'.format(eventdata[2])
elif sensor_type == 0x2a and offset == 3:
return 'User {0}'.format(eventdata[1])
elif sensor_type == 0x2b:
return version_changes.get(eventdata[1], 'Unknown')
elif sensor_type == 0x2c:
cause = (eventdata[1] & 0b11110000) >> 4
cause = fru_states.get(cause, 'Unknown')
oldstate = eventdata[1] & 0b1111
if oldstate != offset:
try:
cause += '(change from {0})'.format(
ipmiconst.sensor_type_offsets[0x2c][oldstate]['desc'])
except KeyError:
pass | 7a90810657edd017b42f7f70a7a0c617435cb14f | 6,990 |
def about_incumbent(branch_df):
"""
number of incumbent updates
incumbent throughput: num_updates / num_nodes
max_improvement, min_improvement, avg_improvement
avg incumbent improvement / first incumbent value
max, min, avg distance between past incumbent updates
distance between last update and last node explored
"""
abs_improvement = pd.Series(abs(branch_df['best_integer'].diff(1)))
bool_updates = pd.Series((abs_improvement != 0))
avg_improvement = abs_improvement.sum() / bool_updates.sum() if bool_updates.sum() != 0 else None
nnz_idx = branch_df['best_integer'].to_numpy.nonzero()
first_incumbent = branch_df['best_integer'].iloc[nnz_idx[0][0]] if len(nnz_idx[0]) != 0 else None
num_updates = bool_updates.sum() # real number of updates (could be 0)
second = float(num_updates) / branch_df['num_nodes'].iloc[-1] if branch_df['num_nodes'].iloc[-1] != 0 else None
sixth = avg_improvement / first_incumbent if avg_improvement and first_incumbent else None
# add dummy 1 (update) at the end of bool_updates
bool_updates[bool_updates.shape[0]] = 1.
non_zeros = bool_updates.values == 1
zeros = ~non_zeros
zero_counts = np.cumsum(zeros)[non_zeros]
zero_counts[1:] -= zero_counts[:-1].copy() # distance between two successive incumbent updates
zeros_to_last = zero_counts[-1]
zero_counts = zero_counts[:-1] # removes last count (to the end) to compute max, min, avg
try:
zeros_stat = [zero_counts.max(), zero_counts.min(), zero_counts.mean(), zeros_to_last]
except ValueError:
zeros_stat = [None]*4
incumbent_list = [
num_updates,
second,
abs_improvement.max(),
abs_improvement.min(),
abs_improvement.mean(),
sixth
]
incumbent_list.extend(zeros_stat)
if len(incumbent_list) != 10:
print("***len(incumbent_list): {}".format(len(incumbent_list)))
return incumbent_list, len(incumbent_list) | 309dd09a6fcad58064e98c79536ca73256fe3ac2 | 6,992 |
from typing import List
def unique_chars(texts: List[str]) -> List[str]:
"""
Get a list of unique characters from list of text.
Args:
texts: List of sentences
Returns:
A sorted list of unique characters
"""
return sorted(set("".join(texts))) | 02bc9ce28498bd129fdb68c2f797d138ca584490 | 6,993 |
def adaptive_max_pool1d(input, output_size):
"""Apply the 1d adaptive max pooling to input.
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
output_size : Union[int, Sequence[int]]
The target output size.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
See Also
--------
`torch.nn.AdaptiveMaxPool1d(...)`_
"""
args = utils._get_adaptive_pool_args(
input.size()[-1:], utils._single(output_size))
return _pool('MAX', utils._single, input, **args) | 06556ea06ebe282bf24739d56ff016924a730c8b | 6,994 |
def get_return_nb(input_value, output_value):
"""Get return from input and output value."""
if input_value == 0:
if output_value == 0:
return 0.
return np.inf * np.sign(output_value)
return_value = (output_value - input_value) / input_value
if input_value < 0:
return_value *= -1
return return_value | fe9ef59feb7b4e9797a74258ecbf890171f6df59 | 6,995 |
def get_rocauc(val,num_iterations):
""" Trains a logistic regression and calculates the roc auc
for classifying products as >=4 stars """
recalls = np.zeros(num_iterations)
precisions = np.zeros(num_iterations)
f1s = np.zeros(num_iterations)
roc_aucs = np.zeros(num_iterations)
factory = lr_wrapper(val,feature_columns=['sim_score_db','sim_score_dm','rating_mean'],y_column='class')
for z in range(num_iterations):
# Slightly annoying thing here that each call to factory uses its own
# train_test_split, so y_test used for recalls will be different than
# y_test used in roc aucs
y_test,y_preds = factory.fit_and_return_preds()
recalls[z] = recall_score(y_test,y_preds)
precisions[z] = precision_score(y_test,y_preds)
f1s[z] = f1_score(y_test,y_preds)
y_test,y_probas = factory.fit_and_return_probas()
roc_aucs[z] = roc_auc_score(y_test, y_probas)
# print(roc_aucs)
return np.mean(recalls),np.mean(precisions),np.mean(f1s),np.mean(roc_aucs) | d2b2ceae240db6c3ce474d74aea1ebd4d1ed9830 | 6,996 |
import torch
def poly_edges_min_length(P, T, distFcn=norm):
"""
Returns the per polygon min edge length
Parameters
----------
P : Tensor
a (N, D,) points set tensor
T : LongTensor
a (M, T,) topology tensor
Returns
-------
Tensor
the (T, M,) min edge length tensor
"""
return torch.min(poly_edges_length(P, T, distFcn=distFcn), dim=1, keepdim=True)[0] | efa68aa752d0f3c1efc29a846f06e006bd8bceb9 | 6,999 |
def softmax(x):
"""A softmax implementation."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0) | 3c8e38bf30304733e957cabab35f8fec1c5fba55 | 7,001 |
import logging
def get_cazy_class_fam_genbank_records(args, session, config_dict):
"""GenBank acc query results from the local CAZyme database for CAZyme from specific classes/fams
:param args: cmd-line argument parser
:param session: open SQLite db session
:param config_dict: dict, defines CAZy classes and families to get sequences for
Return CAZy class and CAZy family GenBank accession query results
"""
logger = logging.getLogger(__name__)
if args.update: # retrieve all GenBank accessions
if args.primary:
logger.warning(
"Retrieving sequences for PRIMARY GenBank accessions that:\n"
"belong to specific CAZy classes/families AND\n"
"do not have a sequence in the db OR the sequence has been updated in NCBI"
)
(
genbank_query_class,
genbank_query_family,
) = query_sql_db.get_prim_gnbk_acc_from_clss_fams(
session,
config_dict,
)
else:
logger.warning(
"Retrieving sequences for PRIMARY GenBank accessions that:\n"
"belong to specific CAZy classes/families AND\n"
"do not have a sequence in the db OR the sequence has been updated in NCBI"
)
(
genbank_query_class,
genbank_query_family,
) = query_sql_db.get_all_gnbk_acc_from_clss_fams(
session,
config_dict,
)
else: # retrieve GenBank accesions of records that don't have a sequence
if args.primary:
logger.warning(
"Retrieving sequences for PRIMARY GenBank accessions that:\n"
"belong to specific CAZy classes/families AND do not have a sequence in the db"
)
(
genbank_query_class,
genbank_query_family,
) = query_sql_db.get_prim_gnbk_acc_from_clss_fams_no_seq(
session,
config_dict,
)
else:
logger.warning(
"Retrieving sequences for PRIMARY GenBank accessions that:\n"
"belong to specific CAZy classes/families AND do not have a sequence in the db"
)
(
genbank_query_class,
genbank_query_family,
) = query_sql_db.get_all_gnbk_acc_from_clss_fams_no_seq(
session,
config_dict,
)
return genbank_query_class, genbank_query_family | 3f2d8f65f811be1de6b839753242e51457f8e03e | 7,002 |
def assign_bias_ID(data, bias_params=None, bias_name='bias_ID', key_name=None, bias_model=None):
"""
Assign a value to each data point that determines which biases are applied to it.
parameters:
data: pointCollection.data instance
bias_parameters: a list of parameters, each unique combination of which defines a different bias
bias_name: a name for the biases
key_name: an optional parameter which will be used as the dataset name, otherwise a key will be built from the parameter values
bias_model: a dict containing entries:
E_bias: a dict of expected bias values for the each biasID, determined from the sigma_corr parameter of the data
bias_ID_dict: a dict giving the parameter values for each bias_ID (or the key_name if provided)
bias_param_dict: a dict giving the mapping from parameter values to bias_ID values
"""
if bias_model is None:
bias_model={'E_bias':dict(), 'bias_param_dict':dict(), 'bias_ID_dict':dict()}
bias_ID=np.zeros(data.size)+-9999
p0=len(bias_model['bias_ID_dict'].keys())
if bias_params is None:
# assign all data the same bias
bias_model['bias_ID_dict'][p0+1]=key_name
bias_ID=p0+1
bias_model['E_bias'][p0+1]=np.nanmedian(data.sigma_corr)
else:
bias_ID=np.zeros(data.size)
temp=np.column_stack([getattr(data, bp) for bp in bias_params])
u_p, i_p=unique_by_rows(temp, return_index=True)
bias_model['bias_param_dict'].update({param:list() for param in bias_params})
bias_model['bias_param_dict'].update({'ID':list()})
for p_num, param_vals in enumerate(u_p):
this_mask=np.ones(data.size, dtype=bool)
param_vals_dict={}
#Identify the data that match the parameter values
for i_param, param in enumerate(bias_params):
this_mask = this_mask & (getattr(data, param)==param_vals[i_param])
param_vals_dict[param]=param_vals[i_param]
#this_name += '%s%3.2f' % (param, param_vals[i_param])
bias_model['bias_param_dict'][param].append(param_vals[i_param])
bias_model['bias_param_dict']['ID'].append(p0+p_num)
this_ind=np.where(this_mask)[0]
bias_ID[this_ind]=p0+p_num
bias_model['bias_ID_dict'][p0+p_num]=param_vals_dict
bias_model['E_bias'][p0+p_num]=np.nanmedian(data.sigma_corr[this_ind])
data.assign({bias_name:bias_ID})
return data, bias_model | 8f2145b5efcd7b892b3f156e1e0c4ff59dac9d43 | 7,003 |
def check(s):
"""
:param s:str. the input of letters
:return: bool.
"""
if len(s) == 7 and len(s.split(' ')) == 4:
for unit in s.split(' '):
if unit.isalpha():
return True | 86e1270af299ba83b68d0dab9f8afc3fc5b7d7c5 | 7,004 |
def pyeval(*args):
"""
.. function:: pyeval(expression)
Evaluates with Python the expression/s given and returns the result
>>> sql("pyeval '1+1'")
pyeval('1+1')
-------------
2
>>> sql("select var('test')") # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator VAR: Variable 'test' does not exist
>>> sql("select var('test', pyeval('1+1'))")
var('test', pyeval('1+1'))
--------------------------
2
>>> sql("select var('test')")
var('test')
-----------
2
>>> sql('''pyeval '1+1' '"-"' '3+1' ''')
pyeval('1+1','"-"','3+1')
-------------------------
2-4
>>> sql("var 'testvar' of select 5")
var('testvar',(select 5))
-------------------------
5
>>> sql("pyeval 'testvar+5'")
pyeval('testvar+5')
-------------------
10
>>> sql('''pyeval keywords('lala') ''')
pyeval('keywords(''lala'')')
----------------------------
lala
"""
if len(args) == 0:
return
r = ''
for i in args:
r = r + str(eval(i, functions.variables.__dict__, functions.rowfuncs.__dict__))
return r | fa7febed8f25860eee497ce670dc9465526cbbc1 | 7,005 |
def with_hyperparameters(uri: Text):
"""Constructs an ImporterNode component that imports a `standard_artifacts.HyperParameters`
artifact to use for future runs.
Args:
uri (Text): Hyperparameter artifact's uri
Returns: ImporterNode
"""
return ImporterNode(
instance_name='with_hyperparameters',
source_uri=uri,
artifact_type=standard_artifacts.HyperParameters) | e06cc33d043e6abd4a9ee30648f72dcea2ad1814 | 7,007 |
def update_user_controller(user_repository_spy): # pylint: disable=W0621
"""montagem de update_user_controller utilizando spy"""
usecase = UpdateUser(user_repository_spy, PasswordHash())
controller = UpdateUserController(usecase)
return controller | 474c2bf42c932d71181bebbf7096cd628ba6956a | 7,008 |
def blockList2Matrix(l):
""" Converts a list of matrices into a corresponding big block-diagonal one. """
dims = [m.shape[0] for m in l]
s = sum(dims)
res = zeros((s, s))
index = 0
for i in range(len(l)):
d = dims[i]
m = l[i]
res[index:index + d, index:index + d] = m
index += d
return res | b13a67cd203930ca2d88ec3cd6dae367b313ae94 | 7,009 |
def log_new_fit(new_fit, log_gplus, mode='residual'):
"""Log the successful refits of a spectrum.
Parameters
----------
new_fit : bool
If 'True', the spectrum was successfully refit.
log_gplus : list
Log of all previous successful refits of the spectrum.
mode : str ('positive_residual_peak', 'negative_residual_peak', 'broad', 'blended')
Specifies the feature that was refit or used for a new successful refit.
Returns
-------
log_gplus : list
Updated log of successful refits of the spectrum.
"""
if not new_fit:
return log_gplus
modes = {'positive_residual_peak': 1, 'negative_residual_peak': 2, 'broad': 3, 'blended': 4}
log_gplus.append(modes[mode])
return log_gplus | 16761ca135efbdb9ee40a42cb8e9e1d62a5dc05e | 7,010 |
def prepare_hr_for_compromised_credentials(hits: list) -> str:
"""
Prepare human readable format for compromised credentials
:param hits: List of compromised credentials
:return: Human readable format of compromised credentials
"""
hr = []
for hit in hits:
source = hit.get('_source', {})
created_date = source.get('breach', {}).get('created_at', {}).get('date-time')
created_date = arg_to_datetime(created_date)
if created_date:
created_date = created_date.strftime(READABLE_DATE_FORMAT) # type: ignore
first_observed_date = source.get('breach', {}).get('first_observed_at', {}).get('date-time')
first_observed_date = arg_to_datetime(first_observed_date)
if first_observed_date:
first_observed_date = first_observed_date.strftime(READABLE_DATE_FORMAT) # type: ignore
data = {
'FPID': source.get('fpid', ''),
'Email': source.get('email', ''),
'Breach Source': source.get('breach', {}).get('source'),
'Breach Source Type': source.get('breach', {}).get('source_type'),
'Password': source.get('password'),
'Created Date (UTC)': created_date,
'First Observed Date (UTC)': first_observed_date
}
hr.append(data)
return tableToMarkdown("Compromised Credential(s)", hr, ['FPID', 'Email', 'Breach Source', 'Breach Source Type',
'Password', 'Created Date (UTC)',
'First Observed Date (UTC)'], removeNull=True) | 846144700d3fe21628306de5aff72a77d2cc9864 | 7,011 |
def red_bg(text):
""" Adds a red background to the given text. """
return colorize(text, "\033[48;5;167m") | edc2741f3246de2c90c9722c4dbd2d813708fe90 | 7,012 |
def model_utils(decoy: Decoy) -> ModelUtils:
"""Get mock ModelUtils."""
return decoy.mock(cls=ModelUtils) | eb5d3eaf8f280086521209f62025e42fca7aec93 | 7,013 |
def getLeftTopOfTile(tilex, tiley):
"""Remember from the comments in the getStartingBoard() function that we have two sets of coordinates in this program. The first set are the pixel coordinates, which on the x-axis ranges from 0 to WINDOWWIDTH - 1, and the y-axis ranges from 0 to WINDOWHEIGHT - 1.
Lembrando que a partir dos comentários na função getStartingBoard() temos dois conjuntos de coordenadas neste programa. O primeiro conjunto são as coordenadas dos pixels, que no intervalo do eixo-x vai de 0 até WINDOWWIDTH - 1 e no intervalo do eixo-y vai de 0 até WINDOWHEIGHT - 1.
The other coordinate system is used to refer to the tiles on the game board. The upper left tile is at 0, 0. The x-axis ranges from 0 to COLS - 1, and the y-axis ranges from 0 to ROWS - 1.
O outro sistema de coordenadas é usado para se referir as peças do jogo no tabuleiro. A peça superior esquerda está em 0,0. O intervalo do eixo-x vai de 0 até COLS -1, e o intervalo do eixo-y vai de 0 até ROWS -1."""
left = XMARGIN + (tilex * TILESIZE) + (tilex - 1)
top = YMARGIN + (tiley * TILESIZE) + (tiley - 1)
return (left, top) | fad5a9df02b05e76ba62013a49d77941b71f6f5f | 7,014 |
def count_str(text, sub, start=None, end=None):
"""
Computes the number of non-overlapping occurrences of substring ``sub`` in ``text[start:end]``.
Optional arguments start and end are interpreted as in slice notation.
:param text: The string to search
:type text: ``str``
:param sub: The substring to count
:type sub: ``str``
:param start: The start of the search range
:type start: ``int``
:param end: The end of the search range
:type end: ``int``
:return: The number of non-overlapping occurrences of substring ``sub`` in ``text[start:end]``.
:rtype: ``int``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.count(sub,start,end) | 1578f868a4f1a193ec9907494e4af613ca2a6d4d | 7,015 |
def tanh(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
"""
if not isinstance(x, Quantity):
return np.tanh(x, out)
return Quantity(
np.tanh(x.rescale(dimensionless).magnitude, out),
dimensionless,
copy=False
) | 3d86565fb512bfe6f8034dd7436b65c1c322cde6 | 7,016 |
from typing import Optional
from typing import Sequence
from pathlib import Path
def epacems(
states: Optional[Sequence[str]] = None,
years: Optional[Sequence[int]] = None,
columns: Optional[Sequence[str]] = None,
epacems_path: Optional[Path] = None,
) -> dd.DataFrame:
"""Load EPA CEMS data from PUDL with optional subsetting.
Args:
states: subset by state abbreviation. Defaults to None (which gets all states).
years: subset by year. Defaults to None (which gets all years).
columns: subset by column. Defaults to None (which gets all columns).
epacems_path: path to parquet dir. By default it automatically loads the path
from :mod:`pudl.workspace`
Returns:
The requested epacems data
"""
all_states = pudl.constants.WORKING_PARTITIONS['epacems']['states']
if states is None:
states = all_states # all states
else:
nonexistent = [state for state in states if state not in all_states]
if nonexistent:
raise ValueError(
f"These input states are not in our dataset: {nonexistent}")
states = list(states)
all_years = pudl.constants.WORKING_PARTITIONS['epacems']['years']
if years is None:
years = all_years
else:
nonexistent = [year for year in years if year not in all_years]
if nonexistent:
raise ValueError(f"These input years are not in our dataset: {nonexistent}")
years = list(years)
# columns=None is handled by dd.read_parquet; gives all columns
if columns is not None:
# nonexistent columns are handled by dd.read_parquet; raises ValueError
columns = list(columns)
if epacems_path is None:
pudl_settings = pudl.workspace.setup.get_defaults()
epacems_path = Path(pudl_settings["parquet_dir"]) / "epacems"
epacems = dd.read_parquet(
epacems_path,
use_nullable_dtypes=True,
columns=columns,
filters=year_state_filter(
states=states,
years=years,
),
)
return epacems | 79213c5adb0b56a3c96335c0c7e5cb1faa734752 | 7,017 |
from typing import Optional
from typing import Tuple
import logging
def check_termination_criteria(
theta: Optional[float],
num_iterations: Optional[int]
) -> Tuple[float, int]:
"""
Check theta and number of iterations.
:param theta: Theta.
:param num_iterations: Number of iterations.
:return: Normalized values.
"""
# treat theta <= 0 as None, as the caller wants to ignore it.
if theta is not None and theta <= 0:
theta = None
# treat num_iterations <= 0 as None, as the caller wants to ignore it.
if num_iterations is not None and num_iterations <= 0:
num_iterations = None
if theta is None and num_iterations is None:
raise ValueError('Either theta or num_iterations (or both) must be provided.')
logging.info(f'Starting evaluation (theta={theta}, num_iterations={num_iterations}).')
return theta, num_iterations | 536cd70b8e8b04d828f0a4af1db96809ab607ff3 | 7,018 |
def verify_password(password, hash):
"""Verify if a hash was generated by the password specified.
:password: a string object (plaintext).
:hash: a string object.
:returns: True or False.
"""
method = get_hash_algorithm(flask.current_app.config['HASH_ALGORITHM'])
return method.verify(password, hash) | 484ad9f2debbd8856b9b7fbdd2a7588f9a279f62 | 7,020 |
import re
def _conversion_sample2v_from_meta(meta_data):
"""
Interpret the meta data to extract an array of conversion factors for each channel
so the output data is in Volts
Conversion factor is: int2volt / channelGain
For Lf/Ap interpret the gain string from metadata
For Nidq, repmat the gains from the trace counts in `snsMnMaXaDw`
:param meta_data: dictionary output from spikeglx.read_meta_data
:return: numpy array with one gain value per channel
"""
def int2volts(md):
""" :return: Conversion scalar to Volts. Needs to be combined with channel gains """
if md.get('typeThis', None) == 'imec':
return md.get('imAiRangeMax') / 512
else:
return md.get('niAiRangeMax') / 32768
int2volt = int2volts(meta_data)
# interprets the gain value from the metadata header:
if 'imroTbl' in meta_data.keys(): # binary from the probes: ap or lf
sy_gain = np.ones(int(meta_data['snsApLfSy'][-1]), dtype=np.float32)
# imroTbl has 384 entries regardless of no of channels saved, so need to index by n_ch
n_chn = _get_nchannels_from_meta(meta_data) - 1
# the sync traces are not included in the gain values, so are included for broadcast ops
gain = re.findall(r'([0-9]* [0-9]* [0-9]* [0-9]* [0-9]*)', meta_data['imroTbl'])[:n_chn]
out = {'lf': np.hstack((np.array([1 / np.float32(g.split(' ')[-1]) for g in gain]) *
int2volt, sy_gain)),
'ap': np.hstack((np.array([1 / np.float32(g.split(' ')[-2]) for g in gain]) *
int2volt, sy_gain))}
elif 'niMNGain' in meta_data.keys(): # binary from nidq
gain = np.r_[
np.ones(int(meta_data['snsMnMaXaDw'][0],)) / meta_data['niMNGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][1],)) / meta_data['niMAGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][2], )) * int2volt, # no gain for analog sync
np.ones(int(np.sum(meta_data['snsMnMaXaDw'][3]),))] # no unit for digital sync
out = {'nidq': gain}
return out | e8cd2ea376bdb44ee999459ffcc28c4c4db39458 | 7,021 |
def read_split_csv(input_files, delimiter='\t', names=['src', 'dst'],
dtype=['int32', 'int32']):
"""
Read csv for large datasets which cannot be read directly by dask-cudf
read_csv due to memory requirements. This function takes large input
split into smaller files (number of input_files > number of gpus),
reads two or more csv per gpu/worker and concatenates them into a
single dataframe. Additional parameters (delimiter, names and dtype)
can be specified for reading the csv file.
"""
client = default_client()
n_files = len(input_files)
n_gpus = get_n_gpus()
n_files_per_gpu = int(n_files/n_gpus)
worker_map = []
for i, w in enumerate(client.has_what().keys()):
files_per_gpu = input_files[i*n_files_per_gpu: (i+1)*n_files_per_gpu]
worker_map.append((files_per_gpu, w))
new_ddf = [client.submit(_read_csv, part, delimiter, names, dtype,
workers=[worker]) for part, worker in worker_map]
wait(new_ddf)
return new_ddf | cd1f2ccd487cf808af1de6a504bc0f6a3a8e34a1 | 7,022 |
def _gnurl( clientID ):
"""
Helper function to form URL to Gracenote_ API service.
:param str clientID: the Gracenote_ client ID.
:returns: the lower level URL to the Gracenote_ API.
:rtype: str
"""
clientIDprefix = clientID.split('-')[0]
return 'https://c%s.web.cddbp.net/webapi/xml/1.0/' % clientIDprefix | 6d1935c8b634459892e4ec03d129c791b1d8a06a | 7,023 |
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. It is assumed that the keys do not need to be XML-escaped.
If the passed dictionary is empty, then return an empty string.
jsled: stolen from django newforms/util.py...
"""
return u''.join([u' %s="%s"' % (k, escape(v)) for k, v in attrs.items()]) | 01d9ee3ec96b5a096758f60c2defe6c491d94817 | 7,024 |
def draw_bboxes(img,boxes,classes):
"""
Draw bounding boxes on top of an image
Args:
img : Array of image to be modified
boxes: An (N,4) array of boxes to draw, where N is the number of boxes.
classes: An (N,1) array of classes corresponding to each bounding box.
Outputs:
An array of the same shape as 'img' with bounding boxes
and classes drawn
"""
source = Image.fromarray(img)
draw = ImageDraw.Draw(source)
w2,h2 = (img.shape[0],img.shape[1])
idx = 0
for i in range(len(boxes)):
xmin,ymin,xmax,ymax = boxes[i]
c = classes[i]
draw.text((xmin+15,ymin+15), str(c))
for j in range(4):
draw.rectangle(((xmin+j, ymin+j), (xmax+j, ymax+j)), outline="red")
return source | 6b60550206aaaa9e5033850c293e6c48a7b13e6d | 7,025 |
def markdown(context, template_path):
""" {% markdown 'terms-of-use.md' %} """
return mark_safe(get_markdown(context, template_path)[0]) | ea6cb711c1a669ad7efdf277baab82ea2a65ba9c | 7,026 |
def investorMasterGetSubaccAssetDetails(email, recvWindow=""):
"""# Query managed sub-account asset details(For Investor Master Account)
#### `GET /sapi/v1/managed-subaccount/asset (HMAC SHA256)`
### Weight:
1
### Parameters:
Name |Type |Mandatory |Description
--------|--------|--------|--------
email |STRING |YES |
recvWindow |LONG |NO |
timestamp |LONG |YES |
"""
endpoint = '/sapi/v1/managed-subaccount/asset'
params = {
"email": email
}
if recvWindow: params["recvWindow"] = recvWindow
return getbinancedata_sig(endpoint, params) | 7d4f4c5cbd069144319268dcb7235926e55f85d8 | 7,027 |
def ema_indicator(close, n=12, fillna=False):
"""EMA
Exponential Moving Average via Pandas
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
ema_ = ema(close, n, fillna)
return pd.Series(ema_, name='ema') | 9ddb20ddc6e0cc4b1f08a4e3347f719dbe84b55b | 7,028 |
def u_glob(U, elements, nodes, resolution_per_element=51):
"""
Compute (x, y) coordinates of a curve y = u(x), where u is a
finite element function: u(x) = sum_i of U_i*phi_i(x).
Method: Run through each element and compute cordinates
over the element.
"""
x_patches = []
u_patches = []
for e in range(len(elements)):
Omega_e = (nodes[elements[e][0]], nodes[elements[e][-1]])
local_nodes = elements[e]
d = len(local_nodes) - 1
X = np.linspace(-1, 1, resolution_per_element)
x = affine_mapping(X, Omega_e)
x_patches.append(x)
u_element = 0
for r in range(len(local_nodes)):
i = local_nodes[r] # global node number
u_element += U[i]*phi_r(r, X, d)
u_patches.append(u_element)
x = np.concatenate(x_patches)
u = np.concatenate(u_patches)
return x, u | 2c9cabf97b9904d80043a0102c0ac8cd156388ae | 7,030 |
def keyring_rgw_create(**kwargs):
"""
Create rgw bootstrap keyring for cluster.
Args:
**kwargs: Arbitrary keyword arguments.
cluster_uuid : Set the cluster UUID. Defaults to value found in
ceph config file.
cluster_name : Set the cluster name. Defaults to "ceph".
"""
params = dict(kwargs)
params["keyring_type"] = "rgw"
return keyring_create(**params) | 077a3536a6e1ce2e762b14d8fb046617136fe941 | 7,031 |
import pandas
from datetime import datetime
def read_tm224_data(filename: str, folder: str = None) -> pandas.DataFrame:
"""
Read data stored by Lakeshore TM224 temperature monitor software.
Args:
filename: string
name of ".xls" file on disk
folder: string
location of file on disk
Returns:
df : pandas.DataFrame
DataFrame with all .xls columns and converted matplotlib timestamps
"""
if not filename.endswith(".xls"):
filename += ".xls"
# Extract only the timestamp
timestamp = pd.read_excel(folder + filename, skiprows=1, nrows=1, usecols=[1], header=None)[1][0]
# Parse datetime object from timestamp
timestamp_dt = parser.parse(timestamp, tzinfos={"CET": 0 * 3600})
# Create DataFrame
df = pd.read_excel(folder + filename, skiprows=3)
# Add matplotlib datetimes to DataFrame
time_array = []
for milliseconds in df["Time"]:
time_array.append(timestamp_dt + datetime.timedelta(milliseconds=milliseconds))
# noinspection PyUnresolvedReferences
df["MPL_datetimes"] = matplotlib.dates.date2num(time_array)
return df | 430e5a64b5b572b721177c5adce7e222883e4512 | 7,032 |
import json
import logging
def load_keypoints2d_file(file_path, njoints=17):
"""load 2D keypoints from keypoint detection results.
Only one person is extracted from the results. If there are multiple
persons in the prediction results, we select the one with the highest
detection score.
Args:
file_path: the json file path.
njoints: number of joints in the keypoint defination.
Returns:
A `np.array` with the shape of [njoints, 3].
"""
keypoint = array_nan((njoints, 3), dtype=np.float32)
det_score = 0.0
try:
with open(file_path, 'r') as f:
data = json.load(f)
except Exception as e: # pylint: disable=broad-except
logging.warning(e)
return keypoint, det_score
det_scores = np.array(data['detection_scores'])
keypoints = np.array(data['keypoints']).reshape((-1, njoints, 3))
# The detection results may contain zero person or multiple people.
if det_scores.shape[0] == 0:
# There is no person in this image. We set NaN to this frame.
return keypoint, det_score
else:
# There are multiple people (>=1) in this image. We select the one with
# the highest detection score.
idx = np.argmax(det_scores)
keypoint = keypoints[idx]
det_score = det_scores[idx]
return keypoint, det_score | 3cf5c8f2c236b3883e983c74e1ac23c78d256b0d | 7,034 |
def utf8_bytes(string):
""" Convert 'string' to bytes using UTF-8. """
return bytes(string, 'UTF-8') | 8e5423d2b53e8d5fbeb07017ccd328236ef8bea5 | 7,035 |
def _get_value(session_browser, field):
"""Get an input field's value."""
return session_browser.evaluate_script('$("#id_%s").val()' % field) | 7ed2d130b83af7e6fdb6cce99efb44846820585a | 7,037 |
import functools
def standarize_ms(datas, val_index, max=(2^32 - 1)):
"""
Standarize milliseconds lapsed from Arduino reading.
Note: Only takes height for one circulation of ms from Arduino.
datas:
List of data readings
val_index:
Index of ms value in reading data entry
max:
Max time of ms - since the Arduino will output
a circular value from the time it starts.
For correct value, see https://www.arduino.cc/en/Reference/Millis.
"""
def _standarize_value(initial_value, reading):
reading[val_index] = int(reading[val_index]) - initial_value;
if(reading[val_index] <= 0):
reading[val_index] += max
return reading
initial_value = int(datas[0][val_index])
___standarize_value = functools.partial(_standarize_value, initial_value=initial_value)
res = map(lambda x: _standarize_value(initial_value, x), datas)
res = list(res)
res[0][val_index] = 0 | 84bf498ff3c88b3415433fa9d5be7b6865b3216b | 7,042 |
def corr_bias(x_data, y_data, yerr, pdz1_x, pdz1_y, pdz2_x, pdz2_y):
"""
Given a correlation measurement and associated PDZs, generate a model and
fit as a bias to the measurement. Return:
1) the model [unbiased] (x and y float arrays)
2) best fit bias (float)
3) the bias PDF (x and y float arrays)
@params
x_data - The central angles of the correlation measurements
y_data - The values of the correlation measurements
yerr - The errorbars of the correlation measurements
pdz1_x - PDZ 1 redshift range to generate models from
pdz1_y - PDZ 1 probability values to generate models from
pdz2_x - PDZ 2 redshift range to generate models from
pdz2_y - PDZ 2 probability values to generate models from
pdz1_x and pdz2_x, pdz1_y and pdz2_y should be the same for an autocorrelation
@returns
xmod - the angular range associated with the generated model
ymod - the value of the model at each angle
best - The best fit bias value
(i.e. square this and multiply it by the base model for
the best fitting model)
xbias - The range of bias values tested
ybias - The probability associated with each bias value
chisq - The not reduced chi square value associated with the best
fit bias value
"""
xmod, ymod = model(pdz1_x, pdz1_y, pdz2_x, pdz2_y)
xbias, ybias, chisq, best = bias_fit(x_data, y_data, yerr, xmod, ymod)
return xmod, ymod, best, xbias, ybias, chisq | 255e1c5a67551deb19b91d247f5a913541d8f1da | 7,043 |
def confidence_ellipse(
x=None, y=None, cov=None, ax=None, n_std=3.0, facecolor="none", **kwargs
):
"""
Create a plot of the covariance confidence ellipse of `x` and `y`
Parameters
----------
x, y : array_like, shape (n, )
Input data.
cov : array_like, shape (2, 2)
covariance matrix. Mutually exclusive with input data.
ax : matplotlib.axes.Axes
The axes object to draw the ellipse into.
n_std : float
The number of standard deviations to determine the ellipse's radiuses.
Returns
-------
matplotlib.patches.Ellipse
Other parameters
----------------
kwargs : `~matplotlib.patches.Patch` properties
"""
if x is None and y is None:
if cov is None:
raise ValueError("Either ")
if x.size != y.size:
raise ValueError("x and y must be the same size")
cov = np.cov(x, y)
pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse(
(0, 0),
width=ell_radius_x * 2,
height=ell_radius_y * 2,
facecolor=facecolor,
**kwargs
)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0]) * n_std
mean_x = np.mean(x)
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1]) * n_std
mean_y = np.mean(y)
transf = (
transforms.Affine2D()
.rotate_deg(45)
.scale(scale_x, scale_y)
.translate(mean_x, mean_y)
)
ellipse.set_transform(transf + ax.transData)
return ax.add_patch(ellipse) | 3965012ccdd1f6b71af4f169b812c384446ed76d | 7,044 |
from typing import List
def adapted_fields(type) -> List[Attribute]:
"""Return the attrs format of `fields()` for attrs and dataclasses."""
if is_dataclass(type):
return [
Attribute(
attr.name,
attr.default
if attr.default is not MISSING
else (
Factory(attr.default_factory)
if attr.default_factory is not MISSING
else NOTHING
),
None,
True,
None,
True,
attr.init,
True,
type=attr.type,
)
for attr in dataclass_fields(type)
]
else:
return attrs_fields(type) | cc6a799e06715cbd4e3219ea42aaeff2e4924613 | 7,046 |
from typing import get_args
def get_parms():
"""
Use get_args to get the args, and return a dictionary of the args ready for
use in pump software.
@see get_args()
:return: dict: parms
"""
parms = {}
args = get_args()
for name, val in vars(args).items():
if val is not None:
parms[name] = val
return parms | 6ebdbee656fd216e5d8c66025029aa2d58641831 | 7,047 |
def make_led_sample(n_samples=200, irrelevant=0, random_state=None):
"""Generate random samples from the 7-segment problem.
Parameters
----------
n_samples : int, optional (default=200)
The number of samples to generate.
irrelevant : int, optional (default=0)
The number of irrelevant binary features to add.
Returns
-------
X, y
"""
random_state = check_random_state(random_state)
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
data = data[random_state.randint(0, 10, n_samples)]
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
if irrelevant > 0:
X = np.hstack((X, random_state.rand(n_samples, irrelevant) > 0.5))
return X, y | 7dab2595c0118ca2f08a99ded22047be164f1648 | 7,049 |
def handle_source_authorization_exception(e):
""" Error handler: the data source requires authorisation
This will be triggered when opening a private HDX dataset before
the user has supplied their authorisation token.
@param e: the exception being handled
"""
if e.message:
flask.flash(e.message)
# we're using flask.g.recipe_id to handle the case where a saved recipe
# points to a formerly-public dataset that has suddenly become private
# normally, it will be None (because there's no saved recipe yet)
recipe = recipes.Recipe(recipe_id=flask.g.recipe_id)
# add an extra parameter for the /data/save form to indicate that we
# want the user to provide an authorisation token
extras = {
'need_token': 'on'
}
# note whether the resource looked like it came from HDX
if e.is_ckan:
extras['is_ckan'] = 'on'
# redirect to the /data/save page to ask the user for a token
return flask.redirect(util.data_url_for('data_save', recipe=recipe, extras=extras), 302) | e2c736b301e229d61874bb3cfad13b86dc93e1d1 | 7,050 |
def findcosmu(re0, rp0, sublat, latc, lon): # considers latc to be plaentocentric latitudes, but sublat to be planetographic
"""Takes the equitorial and polar radius of Jupiter (re0, rp0 respectively), the sub-latitude of Jupiter, latitude and
longitude (both in radians) to determine the "cos(mu)" of the photons. This effectively helps to idenify where the limb
of Jupiter occurs in the Chandra observations"""
rfactor = (re0/rp0)**2 # ratio of the equitorial radius and polar radius...
lat = np.arctan(np.tan(latc)*rfactor) # and coordinate transformation from planetocentric latitude -> planetographic latitude
ans = (rfactor * (np.cos(lon)*np.cos(sublat)*np.cos(lat)) + (np.sin(sublat)*np.sin(lat))) / np.sqrt(rfactor*np.cos(sublat)**2 \
+ np.sin(lat)**2) / np.sqrt(rfactor * np.cos(lat)**2 + np.sin(lat)**2) # to return the value(s) of cos(mu)
return ans | 677adffb6f00e9e1119a71a660ee81d2893d4ef1 | 7,051 |
def RMS_energy(frames):
"""Computes the RMS energy of frames"""
f = frames.flatten()
return N.sqrt(N.mean(f * f)) | 10d366e771f629c6efda2faf1f752363dca63b0a | 7,052 |
import urllib
def is_blacklisted_url(url):
"""
Return whether the URL blacklisted or not.
Using BLACKLIST_URLS methods against the URLs.
:param url: url string
:return: True if URL is blacklisted, else False
"""
url = urllib.parse.urlparse(url).netloc
for method in WHITELIST_URL:
for whitelist_url in WHITELIST_URL[method]:
if method(url, whitelist_url):
return False
for method in BLACKLIST_URLS:
for blacklist_url in BLACKLIST_URLS[method]:
if method(url, blacklist_url):
return True
return False | 8a987c0bbce01d18da67b047aed0e680ce5fc661 | 7,053 |
def heading(yaw):
"""A helper function to getnerate quaternions from yaws."""
q = euler2quat(0.0, 0.0, yaw)
quat = Quaternion()
quat.w = q[0]
quat.x = q[1]
quat.y = q[2]
quat.z = q[3]
return quat | fcd05575257ef6cdc084cb2fde309aa48b5a2fb5 | 7,054 |
def check_login_required(view_func):
"""
A decorator that checks whether login is required on this installation
and, if so, checks if the user is logged in. If login is required and
the user is not logged in, they're redirected to the login link.
"""
def _check(*args, **kwargs):
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get("auth_require_sitewide_login"):
return login_required(view_func)(*args, **kwargs)
else:
return view_func(*args, **kwargs)
return _check | 9f0b44f630a24649d87af0bd604a41b7b5b885de | 7,055 |
def Str(*args):
"""(s1, s2, ...) -> match s1 or s2 or ..."""
if len(args) == 1:
return Str1(args[0])
return Expression.Alt(tuple(map(Str, args))) | 41aece71a6a774db58028add5d60d8c9fed42dd3 | 7,056 |
def image_noise_gaussian(image):
"""
Adds Gaussian noise to the provided image
"""
float_img = image.astype(np.float)
gauss = np.random.normal(0.0, 4.0, (IMG_SIZE, IMG_SIZE, IMG_CHANNELS))
gauss = gauss.reshape(IMG_SIZE, IMG_SIZE, IMG_CHANNELS).astype(np.float)
result = float_img + gauss
result = np.clip(result, 0, 255)
result = result.astype(np.uint8)
return result | 0e5f5a83f7017d48e083a35bcb22cdf50ebb1006 | 7,057 |
from re import T
def argsort(x: T.FloatTensor, axis: int = None) -> T.LongTensor:
"""
Get the indices of a sorted tensor.
If axis=None this flattens x.
Args:
x: A tensor:
axis: The axis of interest.
Returns:
tensor (of ints): indices of sorted tensor
"""
if axis is None:
return flatten(x).sort()[1]
else:
return x.sort(dim=axis)[1] | 57e2e4d8c5a870c4ea382a02e19d0451dbe90704 | 7,058 |
def dirPickledSize(obj,exclude=[]):
"""For each attribute of obj (excluding those specified and those that start with '__'),
compute the size using getPickledSize(obj) and return as a pandas Series of KBs"""
return pd.Series({o:getPickledSize(getattr(obj, o))/1024. for o in dir(obj) if not np.any([o[:2]=='__', o in exclude, getattr(obj, o) is None])}) | d27b404f8c637aa7dd230126d3dbe9112240112c | 7,059 |
from typing import Any
def audit_log() -> Any:
"""
List all events related to the connected member.
"""
if "member_id" not in session:
abort(404)
return render_template(
"audit_log.html",
full_audit_log=fetch_audit_log(session["member_id"]),
) | a5c95ac9c7e55212f8e308a9bf141468dc3a7626 | 7,060 |
def load_comparisonXL(method, evaluate="train", dropna=True):
"""Load comparison table."""
if evaluate == "test":
e = "['Test']"
elif evaluate == "in bag":
e = "['In Bag']"
elif evaluate == "out of bag":
e = "['Out of Bag']"
else:
e = "['Train']"
# Import methods
table = []
for i in method:
table.append(pd.read_excel(i + ".xlsx"))
# Concatenate table
df = pd.DataFrame()
for i in range(len(table)):
df = pd.concat([df, table[i].loc[table[i]['evaluate'] == e].T.squeeze()], axis=1, sort=False)
df = df.T.drop(columns="evaluate")
# Remove [ ] from string
for i in range(len(df)):
for j in range(len(df.T)):
if type(df.iloc[i, j]) is str:
df.iloc[i, j] = df.iloc[i, j][2: -2]
# Reset index and add methods column
method_name = []
for i in range(len(method)):
name_i = method[i].rsplit('/', 1)[1]
method_name.append(name_i)
df = df.reset_index()
df = pd.concat([pd.Series(method_name, name="method"), df], axis=1, sort=False)
df = df.drop("index", 1)
#df = df.set_index("method")
# drop columns with just nans
if dropna is True:
df = df.dropna(axis=1, how='all')
return df | 56ff4d8c74ec88fc8b2f245706b7cf039334a76f | 7,061 |
def verify_user_password(user: User, password: str) -> bool:
"""Verify User's password with the one that was given on login page."""
return pwd_context.verify(password, user.password) | 43b25118e5ef3b89622acd7aa3276a1b18352674 | 7,062 |
def __valid_ddb_response_q(response):
"""private function to validate a given DynamoDB query response."""
if 'ResponseMetadata' in response:
if 'HTTPStatusCode' in response['ResponseMetadata']:
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return True
return False | f4e71c4f5d058ba20013b3a405ffeff637e03ae8 | 7,063 |
def GetPipelineResultsPathInGCS(artifacts_path):
"""Gets a full Cloud Storage path to a pipeline results YAML file.
Args:
artifacts_path: string, the full Cloud Storage path to the folder containing
pipeline artifacts, e.g. 'gs://my-bucket/artifacts'.
Returns:
A string representing the full Cloud Storage path to the pipeline results
YAML file.
"""
return '{0}/results/results.yaml'.format(artifacts_path) | 83b7c15f00679ff201c9a8b155102f36bb8e685c | 7,064 |
def Pnm_p(n, m, x):
"""Eq:II.77 """
return lpmn(m, n, x)[1][-1, -1] | 027cb169263853ede6d29a6760da981d30ef950b | 7,065 |
def _remove_empty_subspace(subspaces, n_clusters, m, P, centers, labels, scatter_matrices):
"""
Check if after rotation and rearranging the dimensionalities a empty subspaces occurs. Empty subspaces will be
removed for the next iteration. Therefore all necessary lists will be updated.
:param subspaces: number of subspaces
:param n_clusters:
:param m: list containing number of dimensionalities for each subspace
:param P: list containing projections for each subspace
:param centers: list containing the cluster centers for each subspace
:param labels: list containing cluster assignments for each subspace
:param scatter_matrices: list containing scatter matrices for each subspace
:return: subspaces, n_clusters, m, P, centers, labels, scatter_matrices
"""
if 0 in m:
np_m = np.array(m)
empty_spaces = np.where(np_m == 0)[0]
print(
"[NrKmeans] ATTENTION:\nSubspaces were lost! Number of lost subspaces:\n" + str(
len(empty_spaces)) + " out of " + str(
len(m)))
subspaces -= len(empty_spaces)
n_clusters = [x for i, x in enumerate(
n_clusters) if i not in empty_spaces]
m = [x for i, x in enumerate(m) if i not in empty_spaces]
P = [x for i, x in enumerate(P) if i not in empty_spaces]
centers = [x for i, x in enumerate(centers) if i not in empty_spaces]
labels = [x for i, x in enumerate(labels) if i not in empty_spaces]
scatter_matrices = [x for i, x in enumerate(
scatter_matrices) if i not in empty_spaces]
return subspaces, n_clusters, m, P, centers, labels, scatter_matrices | 473a509860b9708ee217f4f7b0a2718d1a3a7d7e | 7,066 |
def _get_citekeys_action(elem, doc):
"""
Panflute action to extract citationId from all Citations in the AST.
"""
if not isinstance(elem, pf.Citation):
return None
manuscript_citekeys = global_variables["manuscript_citekeys"]
manuscript_citekeys.append(elem.id)
return None | 74dec7a972f38c34040dc430b0c130b2a76784c2 | 7,067 |
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the
gradient has been averaged across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for each_grad, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(each_grad, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0,values=grads)
grad = tf.reduce_mean(grad, 0)
# The variables are redundant because they are shared
# across towers. So we will just return the first tower's pointer to
# the Variable.
weights = grad_and_vars[0][1]
grad_and_var = (grad, weights)
average_grads.append(grad_and_var)
return average_grads | da85dee074f5bb15a13ea3d2c2fe105469c1ee90 | 7,068 |
def compute_neighbours_probability_matrix(n_matrix, src, d_matrix, sigma_neigh):
"""Compute neighbours' probability matrix.
Parameters
-----------
n_matrix : :py:class:`~numpy.ndarray` of :py:class:`~int`, shape (n_verts, n_neigh_max)
The sets of neighbours.
src : :py:class:`~numpy.ndarray` of :py:class:`~float`, shape (n_verts, 3)
The coordinates of the points in the brain discretization.
d_matrix : :py:class:`~numpy.ndarray` of :py:class:`~float`, shape (n_verts x n_verts)
The Euclidean distance between the points in the
brain discretization.
sigma_neigh : :py:class:`~float`
The standard deviation of the Gaussian distribution that defines
the neighbours' probability.
Returns
--------
np_matrix : :py:class:`~numpy.ndarray` of :py:class:`~float`, shape (n_verts, n_neigh_max)
The neighbours' probability.
"""
np_matrix = np.zeros(n_matrix.shape, dtype=float)
for i in range(src.shape[0]):
n_neig = len(np.where(n_matrix[i] > -1)[0])
np_matrix[i, 0:n_neig] = \
np.exp(-d_matrix[i, n_matrix[i, 0:n_neig]] ** 2
/ (2 * sigma_neigh ** 2))
np_matrix[i] = np_matrix[i] / np.sum(np_matrix[i])
return np_matrix | 2651ad650697266d7e0db5fdc55e176334fc3cb8 | 7,069 |
def ar_cosmap(inmap):
"""
Get the cosine map and off-limb pixel map using WCS.
Generate a map of the solar disk that is 1 at disk center and goes radially outward as the cos(angle to LOS), which
is = 2 at 60 degrees from LOS.
Other outputs:
- rrdeg: gives degrees from disk center
- offlimb: map of 1=on-disk and 0=off-disk
"""
## Take off an extra half percent from the disk to get rid of limb effects
fudge=0.999
#
## Get helioprojective_coordinates
# Below is deprecated so commented out and updated
# xx, yy = wcs.convert_pixel_to_data(inmap.data.shape,
# [inmap.meta["CDELT1"], inmap.meta["CDELT2"]],
# [inmap.meta["CRPIX1"], inmap.meta["CRPIX2"]],
# [inmap.meta["CRVAL1"], inmap.meta["CRVAL2"]])
x, y = (np.meshgrid(*[np.arange(v.value) for v in inmap.dimensions]) * u.pixel)
hpc = inmap.pixel_to_world(x, y)#NEED TO CHECK RE WHAT ORIGIN TO USE, origin=1)
xx = hpc.Tx.value
yy = hpc.Ty.value
rr = ((xx**2.) + (yy**2.))**(0.5)
#
coscor = np.copy(rr)
rrdeg = np.arcsin(coscor / inmap.meta["RSUN_OBS"])
coscor = 1. / np.cos(rrdeg)
wgt = np.where(rr > (inmap.meta["RSUN_OBS"]*fudge))
coscor[wgt] = 1.
#
offlimb = np.copy(rr)
wgtrr = np.where(rr >= (inmap.meta["RSUN_OBS"]*fudge))
offlimb[wgtrr] = 0.
wltrr = np.where(rr < (inmap.meta["RSUN_OBS"]*fudge))
offlimb[wltrr] = 1.
#
return coscor, rrdeg, offlimb | 4365b0ef1134f117e5bc3396239cc1ba174f5009 | 7,070 |
def as_array(request: SubRequest) -> bool:
"""
Boolean fixture to support ExtensionDtype _from_sequence method testing.
"""
b = request.param
assert isinstance(b, bool)
return b | 7a8b627769b8955ad4162a30be5ddc9b0ee76723 | 7,071 |
def gram_matrix(x):
"""Create the gram matrix of x."""
b, c, h, w = x.shape
phi = x.view(b, c, h * w)
return phi.bmm(phi.transpose(1, 2)) / (c * h * w) | 11de97b67f3f8ecb7d7d009de16c1a5d153ab8ff | 7,072 |
def open_file(path, mode):
"""
Attempts to open file at path.
Tried up to max_attempts times because of intermittent permission errors on Windows
"""
max_attempts = 100
f = None
for _ in range(max_attempts):
try:
f = open(path, mode)
except PermissionError:
continue
break
return f | 9217a1b66b2bb30895fe445fa4a50b5da5466391 | 7,074 |
import requests
import json
def get_sts_token(current_refresh_token):
"""
Retrieves an authentication token.
:param current_refresh_token: Refresh token retrieved from a previous authentication, used to retrieve a
subsequent access token. If not provided (i.e. on the initial authentication), the password is used.
"""
url = 'https://{}:{}/{}'.format(opts.authHostname, opts.authPort, auth_path)
if not current_refresh_token: # First time through, send password
data = {'username': opts.user, 'password': opts.password, 'grant_type': 'password', 'takeExclusiveSignOnControl': True,
'scope': scope}
print("Sending authentication request with password to ", url, "...")
else: # Use the given refresh token
data = {'username': opts.user, 'refresh_token': current_refresh_token, 'grant_type': 'refresh_token',
'takeExclusiveSignOnControl': True}
print("Sending authentication request with refresh token to ", url, "...")
try:
r = requests.post(url,
headers={'Accept': 'application/json'},
data=data,
auth=(opts.clientid, client_secret),
verify=True)
except requests.exceptions.RequestException as e:
print('RDP-GW authentication exception failure:', e)
return None, None, None
if r.status_code != 200:
print('RDP-GW authentication result failure:', r.status_code, r.reason)
print('Text:', r.text)
if r.status_code in [401,400] and current_refresh_token:
# Refresh token may have expired. Try again using machinedID + password.
return get_sts_token(None)
return None, None, None
auth_json = r.json()
print("RDP-GW Authentication succeeded. RECEIVED:")
print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))
return auth_json['access_token'], auth_json['refresh_token'], auth_json['expires_in'] | b41c6658a4eb218771d6e908411ba3b54e4e13f3 | 7,075 |
async def device_climate_fan(device_climate_mock):
"""Test thermostat with fan device."""
return await device_climate_mock(CLIMATE_FAN) | 1143adceacb610d18e1a26df4e24f715eb68917f | 7,076 |
def make_training_config(args):
""" Create training config by parsing args from command line and YAML config file, filling the rest with default values.
Args
args : Arguments parsed from command line.
Returns
config : Dictionary containing training configuration.
"""
# Parse the configuration file.
config = {}
if args.config:
config = parse_yaml(args.config)
config = set_defaults(config, default_training_config)
# Additional config; start from this so it can be overwritten by the other command line options.
if args.o:
config = parse_additional_options(config, args.o)
if args.backbone:
config['backbone']['name'] = args.backbone
if args.generator:
config['generator']['name'] = args.generator
# Backbone config.
if args.freeze_backbone:
config['backbone']['details']['freeze'] = args.freeze_backbone
if args.backbone_weights:
config['backbone']['details']['weights'] = args.backbone_weights
# Generator config.
if args.random_transform:
config['generator']['details']['transform_generator'] = 'random'
if args.random_visual_effect:
config['generator']['details']['visual_effect_generator'] = 'random'
if args.batch_size:
config['generator']['details']['batch_size'] = args.batch_size
if args.group_method:
config['generator']['details']['group_method'] = args.group_method
if args.shuffle_groups:
config['generator']['details']['shuffle_groups'] = args.shuffle_groups
if args.image_min_side:
config['generator']['details']['image_min_side'] = args.image_min_side
if args.image_max_side:
config['generator']['details']['image_max_side'] = args.image_max_side
# Train config.
if args.gpu:
config['train']['gpu'] = args.gpu
if args.epochs:
config['train']['epochs'] = args.epochs
if args.steps:
config['train']['steps_per_epoch'] = args.steps
if args.lr:
config['train']['lr'] = args.lr
if args.multiprocessing:
config['train']['use_multiprocessing'] = args.multiprocessing
if args.workers:
config['train']['workers'] = args.workers
if args.max_queue_size:
config['train']['max_queue_size'] = args.max_queue_size
if args.weights:
config['train']['weights'] = args.weights
return config | 1902e0999336249a7feda1f0aa415f7d148a16ee | 7,077 |
from typing import Tuple
def _crown_relu_relaxer(inp: Bound) -> Tuple[LinFun, LinFun]:
"""Obtain the parameters of a linear ReLU relaxation as in CROWN.
This relaxes the ReLU with the adaptive choice of lower bounds as described
for CROWN-ada in https://arxiv.org/abs/1811.00866.
Args:
inp: Input to the ReLU.
Returns:
lb_linfun, ub_linfun: Linear functions bounding the ReLU
"""
inp_lower, inp_upper = inp.lower, inp.upper
relu_on = (inp_lower >= 0.)
relu_amb = jnp.logical_and(inp_lower < 0., inp_upper >= 0.)
ub_slope = relu_on.astype(jnp.float32)
ub_slope += jnp.where(relu_amb,
inp_upper / jnp.maximum(inp_upper - inp_lower, 1e-12),
jnp.zeros_like(inp_lower))
ub_offset = jnp.where(relu_amb, - ub_slope * inp_lower,
jnp.zeros_like(inp_lower))
lb_slope = (ub_slope >= 0.5).astype(jnp.float32)
lb_offset = jnp.zeros_like(inp_lower)
return (eltwise_linfun_from_coeff(lb_slope, lb_offset),
eltwise_linfun_from_coeff(ub_slope, ub_offset)) | 7e43e973adb65089a2eb35665c219911fc409446 | 7,078 |
import torch
import time
def run_single_measurement(model_name, produce_model, run_model, teardown, inp, criterion, extra_params, use_dtr, use_profiling):
"""
This function initializes a model and performs
a single measurement of the model on the given input.
While it might seem most reasonable to initialize
the model outside of the loop, DTR's logs have shown
that certain constants in the model persist between loop iterations;
performing these actions in a separate *function scope* turned out to be the only
way to prevent having those constants hang around.
Returns a dict of measurements
"""
torch.cuda.reset_max_memory_allocated()
# resetting means the count should be reset to
# only what's in scope, meaning only the input
input_mem = torch.cuda.max_memory_allocated()
model = produce_model(extra_params=extra_params)
params = []
for m in model:
if hasattr(m, 'parameters'):
params.extend(m.parameters())
model_mem = torch.cuda.max_memory_allocated()
optimizer = torch.optim.SGD(model[0].parameters(), 1e-3, momentum=0.9, weight_decay=1e-4)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
# start timing
torch.cuda.synchronize()
start_time = time.time()
if use_dtr:
torch.reset_profile()
start.record()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
run_model(criterion, *model, *inp, optimizer=optimizer)
end.record()
start_sync = time.time()
torch.cuda.synchronize()
end_sync = time.time()
end_time = time.time()
# end timing
if use_dtr:
# operators-only time, tracked by DTR
cuda_time = torch.compute_time()
base_compute_time = -1
remat_compute_time = -1
search_time = -1
cost_time = -1
if use_profiling:
base_compute_time = torch.base_compute_time()
remat_compute_time = torch.remat_compute_time()
search_time = torch.search_time()
cost_time = torch.cost_time()
torch.reset_profile()
total_mem = torch.cuda.max_memory_allocated()
teardown(*model)
torch.cuda.reset_max_memory_allocated()
del model
if use_dtr:
torch.toggle_log(False)
del params
batch_size = len(inp[0])
ips = batch_size / (end_time - start_time)
result = {
'time': end_time - start_time,
'sync_time': end_sync - start_sync,
'gpu_time': start.elapsed_time(end),
'input_mem': input_mem,
'model_mem': model_mem,
'total_mem': total_mem,
'base_compute_time': base_compute_time,
'remat_compute_time': remat_compute_time,
'search_time': search_time,
'cost_time': cost_time,
'batch_size': batch_size,
'ips': ips
}
if use_dtr:
result['cuda_time'] = cuda_time
else:
result['cuda_time'] = -1.0
return result | a3765a88ccb10b3f0322f11f8205ecfdb7f98f38 | 7,079 |
def make_noise(fid, snr, decibels=True):
"""Given a synthetic FID, generate an array of normally distributed
complex noise with zero mean and a variance that abides by the desired
SNR.
Parameters
----------
fid : numpy.ndarray
Noiseless FID.
snr : float
The signal-to-noise ratio.
decibels : bool, default: True
If `True`, the snr is taken to be in units of decibels. If `False`,
it is taken to be simply the ratio of the singal power and noise
power.
Returns
_______
noise : numpy.ndarray
"""
components = [
(fid, 'fid', 'ndarray'),
(snr, 'snr', 'float'),
(decibels, 'decibels', 'bool'),
]
ArgumentChecker(components)
size = fid.size
shape = fid.shape
# Compute the variance of the noise
if decibels:
var = np.real((np.sum(np.abs(fid) ** 2)) / (size * (20 ** (snr / 10))))
else:
var = np.real((np.sum(np.abs(fid) ** 2)) / (2 * size * snr))
# Make a number of noise instances and check which two are closest
# to the desired variance.
# These two are then taken as the real and imaginary noise components
instances = []
var_discrepancies = []
for _ in range(100):
instance = nrandom.normal(loc=0, scale=np.sqrt(var), size=shape)
instances.append(instance)
var_discrepancies.append(np.abs(np.var(instances) - var))
# Determine which instance's variance is the closest to the desired
# variance
first, second, *_ = np.argpartition(var_discrepancies, 1)
# The noise is constructed from the two closest arrays in a variance-sense
# to the desired SNR
return instances[first] + 1j * instances[second] | 823c9fee2c1a696a38b6a27406f51a27185460c1 | 7,080 |
def viterbi(prob_matrix):
""" find the most likely sequence of labels using the viterbi algorithm on prob_matrix """
TINY = 1e-6 # to avoid NaNs in logs
# if prob_matrix is 1D, make it 2D
if len(np.shape(prob_matrix)) == 1:
prob_matrix = [prob_matrix]
length = len(prob_matrix)
probs = np.zeros_like(prob_matrix)
backpt = np.ones_like(prob_matrix, dtype=np.int32) * -1
for i in [0,1,2,3,4]:
probs[0][i] = np.log(prob_matrix[0][i]+TINY)
# {B, M, E, S} <=== 0:begin, 1:middle, 2:end, 3:single
for t in range(1, length):
# E, S -> B | B, M -> M | B, M -> E | E, S -> S
previous_of = [[0,0], [3,4], [1,2], [1,2], [3,4]]
for i in range(5):
prevs = previous_of[i]
max_id = prevs[np.argmax([probs[t-1][prevs[0]], probs[t-1][prevs[1]]])]
backpt[t][i] = max_id
probs[t][i] = np.log(prob_matrix[t][i]+TINY) + probs[t-1][max_id]
seq = np.ones(length, 'int32') * -1
#print(probs[length-1])
seq[length-1] = np.argmax(probs[length-1])
#print(seq[length-1])
max_prob = probs[length-1][seq[length-1]]
for t in range(1, length):
seq[length-1-t] = backpt[length-t][seq[length-t]]
return seq | 50b28dcf7cedc75adb4a41cb9ccf2152af5f4b8f | 7,081 |
def slsn_constraint(parameters):
"""
Place constraints on the magnetar rotational energy being larger than the total output energy,
and the that nebula phase does not begin till at least a 100 days.
:param parameters: dictionary of parameters
:return: converted_parameters dictionary where the violated samples are thrown out
"""
converted_parameters = parameters.copy()
mej = parameters['mej'] * solar_mass
vej = parameters['vej'] * km_cgs
kappa = parameters['kappa']
mass_ns = parameters['mass_ns']
p0 = parameters['p0']
kinetic_energy = 0.5 * mej * vej**2
rotational_energy = 2.6e52 * (mass_ns/1.4)**(3./2.) * p0**(-2)
tnebula = np.sqrt(3 * kappa * mej / (4 * np.pi * vej ** 2)) / 86400
neutrino_energy = 1e51
total_energy = kinetic_energy + neutrino_energy
# ensure rotational energy is greater than total output energy
converted_parameters['erot_constraint'] = rotational_energy - total_energy
# ensure t_nebula is greater than 100 days
converted_parameters['t_nebula_min'] = tnebula - 100
return converted_parameters | 9fd4cc37c783aa1afdc816edbc88c45132fb4026 | 7,082 |
def grover_circuit(n,o,iter):
"""Grover Search Algorithm
:param n: Number of qubits (not including ancilla)
:param o: Oracle int to find
:return qc: Qiskit circuit
"""
def apply_hadamard(qc, qubits,a=None) -> None:
"""Apply a H-gate to 'qubits' in qc"""
for q in qubits:
qc.h(q)
if a is not None:
qc.h(a)
def initialize_bits(qc,qubits,a) -> None:
"Start qubits at 0 and ancilla bit at 1"
for q in qubits:
qc.reset(q)
qc.reset(a[0])
qc.x(a[0])
def apply_mean_circuit(qc, qubits) -> None:
"""Apply a H-gate to 'qubits' in qc"""
control_qubits = []
for q in qubits:
qc.h(q)
qc.x(q)
control_qubits.append(q)
cZ = control_qubits[-1]
control_qubits.pop()
qc.h(cZ)
qc.mcx(control_qubits,cZ)
qc.h(cZ)
for q in qubits:
qc.x(q)
qc.h(q)
def create_oracle(qc,qubit,ancilla,oracle,n) -> None:
"""Creates a quantum oracle."""
test_list = []
for q in qubit:
test_list.append(q)
_oracle_logic(qc, qubit, oracle,n)
qc.mcx(test_list,ancilla[0])
_oracle_logic(qc, qubit, oracle,n)
def _oracle_logic(qc, qubit, oracle,n) -> None:
if 0 <= oracle <= 2**len(qubit)-1:
bin_list = [int(i) for i in list('{0:0b}'.format(oracle))]
if len(bin_list) < n:
for _ in range(0,n-len(bin_list)):
bin_list.insert(0,0)
for i in range(0,len(bin_list)):
if bin_list[i] == 0:
qc.x(q[i])
else:
raise ValueError('Oracle must be between 0 and 2^n-1')
# print(f"Creating circuit with {n} qubits")
q = QuantumRegister(n, 'q')
a = QuantumRegister(1, 'a')
c = ClassicalRegister(n, 'c')
qc = QuantumCircuit(q,a,c)
i2b = "{0:b}".format(o)
# print(f"Oracle set to: {o} ({i2b})")
# print(" ")
initialize_bits(qc,q,a)
qc.barrier(q,a)
apply_hadamard(qc,q,a)
# print(f"Generating {iter} Grover module(s)")
# print("=====================================")
for _ in range(1,iter+1):
qc.barrier(q,a)
create_oracle(qc,q,a,o,n)
qc.barrier(q,a)
apply_mean_circuit(qc, q)
qc.barrier(q,a)
for i in range(0,len(q)):
qc.measure(q[i],c[len(q)-1-i])
return qc | fac61eda28a249e333dabd46c7d404603141c07c | 7,083 |
def reference_cluster(envs, in_path):
"""
Return set of all env in_paths referencing or
referenced by given in_path.
>>> cluster = sorted(reference_cluster([
... {'in_path': 'base', 'refs': []},
... {'in_path': 'test', 'refs': ['base']},
... {'in_path': 'local', 'refs': ['test']},
... ], 'test'))
>>> cluster == ['base', 'local', 'test']
True
"""
edges = [
set([env['in_path'], fix_reference_path(env['in_path'], ref)])
for env in envs
for ref in env['refs']
]
prev, cluster = set(), set([in_path])
while prev != cluster:
# While cluster grows
prev = set(cluster)
to_visit = []
for edge in edges:
if cluster & edge:
# Add adjacent nodes:
cluster |= edge
else:
# Leave only edges that are out
# of cluster for the next round:
to_visit.append(edge)
edges = to_visit
return cluster | 6398705dfb63c30de62b2eb900d88612e5144774 | 7,084 |
def scheme_apply(procedure, args, env):
"""Apply Scheme PROCEDURE to argument values ARGS in environment ENV."""
if isinstance(procedure, PrimitiveProcedure):
return apply_primitive(procedure, args, env)
elif isinstance(procedure, UserDefinedProcedure):
new_env = make_call_frame(procedure, args, env)
return eval_all(procedure.body, new_env)
else:
raise SchemeError("cannot call: {0}".format(str(procedure))) | 14879f29a5e8c3c5b7d4d41be35730eb66dbdc66 | 7,085 |
def _validate_num_clusters(num_clusters, initial_centers, num_rows):
"""
Validate the combination of the `num_clusters` and `initial_centers`
parameters in the Kmeans model create function. If the combination is
valid, determine and return the correct number of clusters.
Parameters
----------
num_clusters : int
Specified number of clusters.
initial_centers : SFrame
Specified initial cluster center locations, in SFrame form. If the
number of rows in this SFrame does not match `num_clusters`, there is a
problem.
num_rows : int
Number of rows in the input dataset.
Returns
-------
_num_clusters : int
The correct number of clusters to use going forward
"""
## Basic validation
if num_clusters is not None and not isinstance(num_clusters, int):
raise _ToolkitError("Parameter 'num_clusters' must be an integer.")
## Determine the correct number of clusters.
if initial_centers is None:
if num_clusters is None:
raise ValueError("Number of clusters cannot be determined from " +
"'num_clusters' or 'initial_centers'. You must " +
"specify one of these arguments.")
else:
_num_clusters = num_clusters
else:
num_centers = initial_centers.num_rows()
if num_clusters is None:
_num_clusters = num_centers
else:
if num_clusters != num_centers:
raise ValueError("The value of 'num_clusters' does not match " +
"the number of provided initial centers. " +
"Please provide only one of these arguments " +
"or ensure the values match.")
else:
_num_clusters = num_clusters
if _num_clusters > num_rows:
raise ValueError("The desired number of clusters exceeds the number " +
"of data points. Please set 'num_clusters' to be " +
"smaller than the number of data points.")
return _num_clusters | 67d0be234a97c33eb742c70e8d6bb30be4608ab2 | 7,086 |
import mimetypes
def urlinline(filename, mime=None):
"""
Load the file at "filename" and convert it into a data URI with the
given MIME type, or a guessed MIME type if no type is provided.
Base-64 encodes the data.
"""
infile = open(filename, 'rb')
text = infile.read()
infile.close()
enc = b64.standard_b64encode(text)
if mime is None:
mime, _ = mimetypes.guess_type(filename)
mime = mime or DEFAULT_MIME_TYPE
ret = "data:%s;base64,%s" % (mime, enc)
return ret | 4b8035944a7a25d5b3ce3bc8a8fbd0a4dd424447 | 7,087 |
def star_rating(new_rating=None, prev_rating=None):
"""
Generates the query to update the product's star ratings. Inc method is
from https://docs.mongodb.com/manual/reference/operator/update/inc/
"""
add_file = {
1: {"one_star": 1},
2: {"two_stars": 1},
3: {"three_stars": 1},
4: {"four_stars": 1},
5: {"five_stars": 1}
}
delete_file = {
1: {"one_star": -1},
2: {"two_stars": -1},
3: {"three_stars": -1},
4: {"four_stars": -1},
5: {"five_stars": -1}
}
if new_rating and prev_rating:
return {"$inc": {add_file[new_rating], delete_file[prev_rating]}}
elif new_rating:
return {"$inc": add_file[new_rating]}
else:
return {"$inc": delete_file[prev_rating]} | e50f8271dbbb8c2722729cce6a8f036c851c4e95 | 7,089 |
def check_encoder(value: EncoderArg) -> EncoderFactory:
"""Checks value and returns EncoderFactory object.
Returns:
d3rlpy.encoders.EncoderFactory: encoder factory object.
"""
if isinstance(value, EncoderFactory):
return value
if isinstance(value, str):
return create_encoder_factory(value)
raise ValueError("This argument must be str or EncoderFactory object.") | 5e23b483df8fbe190f1ac6ccf743bc783728adf8 | 7,090 |
import pathlib
def allowed_task_name(name: str) -> bool:
"""Determine whether a task, which is a 'non-core-OSCAL activity/directory is allowed.
args:
name: the task name which is assumed may take the form of a relative path for task/subtasks.
Returns:
Whether the task name is allowed or not allowed (interferes with assumed project directories such as catalogs).
"""
# Task must not use an OSCAL directory
# Task must not self-interfere with a project
pathed_name = pathlib.Path(name)
root_path = pathed_name.parts[0]
if root_path in const.MODEL_TYPE_TO_MODEL_DIR.values():
logger.error('Task name is the same as an OSCAL schema name.')
return False
elif root_path[0] == '.':
logger.error('Task name must not start with "."')
return False
elif pathed_name.suffix != '':
# Does it look like a file
logger.error('tasks name must not look like a file path (e.g. contain a suffix')
return False
return True | 231d7a98f5d6b7059f5517283ec3bed35264050e | 7,091 |
def get_ignored_classes(uppercase, lowercase, digit):
"""
get tuple of ignored classes based on selected classes
:param
uppercase: whether to keep uppercase classes
:param
lowercase: whether to keep lowercase classes
:param
digit: whether to keep digit classes
:return:
tuple of ignored classes
"""
# result placeholder
ignored = []
# add digit classes to the ignore list
if not digit:
ignored.append(dataset.get_classes('digit'))
# add uppercase classes to the ignore list
if not uppercase:
ignored.append(dataset.get_classes('uppercase'))
# add lowercase classes to the ignore list
if not lowercase:
ignored.append(dataset.get_classes('lowercase'))
# return tuple
return tuple(ignored) | 2a2380f4f984feb42ce1de912739fd395a8422bd | 7,092 |
import torch
def unscaled_prediction_rmse(model, input_tensor, label_tensor, scalar, loading_length=0, return_loading_error=False,
device=None):
"""
Prediction RMSE.
:param model: model
:param input_tensor: input tensor
:param label_tensor: label tensor
:param scalar: scalar for transforming output data
:param loading_length: time length used for loading the NARX
:param return_loading_error: return the loading RMSE with the multi-step ahead RMSE
:param device: specified device to use (Default: None - select what is available)
:return: prediction rmse
"""
# Create Network on GPU/CPU
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
# Training Data on GPU/CPU
input_tensor, label_tensor = input_tensor.to(device), label_tensor.to(device)
# Sort data for loading and k-step ahead predictions.
inputs, labels, itdl, otdl = init_tdl(model, input_tensor, label_tensor, device)
loading_labels, k_step_labels = labels[:, :loading_length, :], labels[:, loading_length:, :]
# Perform a k-step ahead prediction
k_step_outputs, loading_outputs = __multi_step_ahead_prediction(model, input_tensor, label_tensor,
loading_length, device)
if return_loading_error:
# Combine loading and multi-step predictions/labels
outputs = torch.cat([loading_outputs, k_step_outputs], dim=1)
labels = torch.cat([loading_labels, k_step_labels], dim=1)
else:
# Use the multi-step predictions/labels
outputs = k_step_outputs
labels = k_step_labels
labels = labels.cpu().data.numpy()
labels = labels.reshape((labels.shape[0], labels.shape[1]))
labels = (labels - scalar.min_[1]) / scalar.scale_[1]
outputs = outputs.cpu().data.numpy()
outputs = outputs.reshape((outputs.shape[0], outputs.shape[1]))
outputs = (outputs - scalar.min_[1]) / scalar.scale_[1]
error = labels - outputs
error = np.sqrt((np.power(error, 2)).mean(axis=0))
return error | ea7b0e2c2fd022cc7bcb466057feacf5a1fbaa00 | 7,093 |
import copy
def __copyList__(fromList, initialValues = None):
"""
Returns a copy of the provided list. Initial values must either be a single value, or
a list of exactly the same size as the provided list.
"""
if __isListType__(fromList) is False:
raise ValueError('The provided value to copy was not a list!')
fromList = copy.deepcopy(fromList)
if initialValues is not None:
initialValues = copy.deepcopy(initialValues)
if initialValues is None or __isNonStringIterableType__(initialValues) is False:
copySingleValue = True
elif __isNonStringIterableType__(initialValues) and len(initialValues) == 1 or __isListType__(initialValues) is False:
# Treat an initialValue object with 1 element the same as a non-iterable, so we could set every value to a list, or to a non-list value
copySingleValue = True
else:
if len(initialValues) != len(fromList):
raise ValueError('The initial values list must be the same size as the list to copy!')
else:
copySingleValue = False
returnList = fromList[:]
for itemIndex in range(len(returnList)):
if copySingleValue is True:
returnList[itemIndex] = initialValues
else:
returnList[itemIndex] = initialValues[itemIndex]
return returnList | 9f126a10795132b5d2ddaeef552c6e5abd8680ba | 7,094 |
import re
def build_or_pattern(patterns, escape=False):
"""Build a or pattern string from a list of possible patterns
"""
or_pattern = []
for pattern in patterns:
if not or_pattern:
or_pattern.append('(?:')
else:
or_pattern.append('|')
or_pattern.append('(?:%s)' % re.escape(pattern) if escape else pattern)
or_pattern.append(')')
return ''.join(or_pattern) | 225cc20504a85342694e14ea76b9bf3ed8b6d11b | 7,095 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.