content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def autoclean_cv(training_dataframe, testing_dataframe, drop_nans=False, copy=False,
encoder=None, encoder_kwargs=None, ignore_update_check=False):
"""Performs a series of automated data cleaning transformations on the provided training and testing data sets
Unlike `autoclean()`, this function takes cross-validation into account by learning the data transformations
from only the training set, then applying those transformations to both the training and testing set.
By doing so, this function will prevent information leak from the training set into the testing set.
Parameters
----------
training_dataframe: pandas.DataFrame
Training data set
testing_dataframe: pandas.DataFrame
Testing data set
drop_nans: bool
Drop all rows that have a NaN in any column (default: False)
copy: bool
Make a copy of the data set (default: False)
encoder: category_encoders transformer
The a valid category_encoders transformer which is passed an inferred cols list. Default (None: LabelEncoder)
encoder_kwargs: category_encoders
The a valid sklearn transformer to encode categorical features. Default (None)
ignore_update_check: bool
Do not check for the latest version of datacleaner
Returns
----------
output_training_dataframe: pandas.DataFrame
Cleaned training data set
output_testing_dataframe: pandas.DataFrame
Cleaned testing data set
"""
global update_checked
if ignore_update_check:
update_checked = True
if not update_checked:
update_check('datacleaner', __version__)
update_checked = True
if set(training_dataframe.columns.values) != set(testing_dataframe.columns.values):
raise ValueError('The training and testing DataFrames do not have the same columns. '
'Make sure that you are providing the same columns.')
if copy:
training_dataframe = training_dataframe.copy()
testing_dataframe = testing_dataframe.copy()
if drop_nans:
training_dataframe.dropna(inplace=True)
testing_dataframe.dropna(inplace=True)
if encoder_kwargs is None:
encoder_kwargs = {}
for column in training_dataframe.columns.values:
# Replace NaNs with the median or mode of the column depending on the column type
try:
column_median = training_dataframe[column].median()
training_dataframe[column].fillna(column_median, inplace=True)
testing_dataframe[column].fillna(column_median, inplace=True)
except TypeError:
column_mode = training_dataframe[column].mode()[0]
training_dataframe[column].fillna(column_mode, inplace=True)
testing_dataframe[column].fillna(column_mode, inplace=True)
# Encode all strings with numerical equivalents
if str(training_dataframe[column].values.dtype) == 'object':
if encoder is not None:
column_encoder = encoder(**encoder_kwargs).fit(training_dataframe[column].values)
else:
column_encoder = LabelEncoder().fit(training_dataframe[column].values)
training_dataframe[column] = column_encoder.transform(training_dataframe[column].values)
testing_dataframe[column] = column_encoder.transform(testing_dataframe[column].values)
return training_dataframe, testing_dataframe | 37b8221193c05db97dde355e06313a9372cd8193 | 3,659,500 |
import asyncio
def make_coroutine_from_tree(tree, filename="<aexec>", symbol="single",
local={}):
"""Make a coroutine from a tree structure."""
dct = {}
tree.body[0].args.args = list(map(make_arg, local))
exec(compile(tree, filename, symbol), dct)
return asyncio.coroutine(dct[CORO_NAME])(**local) | cbf4e0b0278abc0e929f4ed8b2a9c421b4e8f3c6 | 3,659,501 |
def update_Sigmai(Yi, Es, Vars):
"""
Return new Sigma_i: shape k
"""
return np.mean((Yi - Es) ** 2, axis=1) + np.mean(Vars, axis=1) | f2cb1fa7f5e6b48f033207ee6bb84b8e865c863c | 3,659,502 |
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_offsets = tf.cast(flat_offsets, tf.int64)
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
# https://github.com/tensorflow/tensorflow/issues/36236
output_tensor = tf.gather(flat_sequence_tensor*1, flat_positions)
return output_tensor | 583ed7ce925ace45dd2a6c9a78efd0360bd141e0 | 3,659,503 |
import typing
def check_sub_schema_dict(sub_schema: typing.Any) -> dict:
"""Check that a sub schema in an allOf is a dict."""
if not isinstance(sub_schema, dict):
raise exceptions.MalformedSchemaError(
"The elements of allOf must be dictionaries."
)
return sub_schema | b64313b28ab63b8342de7b0422cc8c9087a28462 | 3,659,504 |
def get_proto_root(workspace_root):
"""Gets the root protobuf directory.
Args:
workspace_root: context.label.workspace_root
Returns:
The directory relative to which generated include paths should be.
"""
if workspace_root:
return "/{}".format(workspace_root)
else:
return "" | 35cff0b28ee6c1893e5dba93593126c996ba72cc | 3,659,505 |
def bwimcp(J, K, x, tr=.2, alpha=.05):
"""
Multiple comparisons for interactions
in a split-plot design.
The analysis is done by taking difference scores
among all pairs of dependent groups and
determining which of
these differences differ across levels of Factor A
using trimmed means. FWE is controlled via Hochberg's
method. For MOM or M-estimators
(possibly not implemented yet), use spmcpi which
uses a bootstrap method
:param J: int
Number of J levels associated with Factor A
:param K: int
Number of K levels associated with Factor B
:param x: Pandas DataFrame
Each column represents a cell in the factorial design. For example,
a 2x3 design would correspond to a DataFrame with 6 columns
(levels of Factor A x levels of Factor B).
Order your columns according to the following pattern
(traversing each row in a matrix):
- the first column contains data for level 1 of Factor A
and level 1 of Factor B
- the second column contains data for level 1 of Factor A
and level 2 of Factor B
- column `K` contains the data for level 1 of Factor A
and level `K` of Factor B
- column `K` + 1 contains the data for level 2 of Factor A
and level 1 of Factor B
- and so on ...
:param tr: float
Proportion to trim (default is .2)
:param alpha: float
Alpha level (default is .05)
:return:
Dictionary of results
con: array
Contrast matrix
output: DataFrame
Difference score, p-value, and critical value for each contrast relating to the interaction
"""
x=pandas_to_arrays(x)
x=remove_nans_based_on_design(x, [J, K], 'between_within')
MJ = (J ** 2 - J) // 2
MK = (K ** 2 - K) // 2
JMK = J * MK
MJMK = MJ * MK
Jm = J - 1
#output = np.zeros([MJMK, 7])
output = np.zeros([MJMK, 4])
_, _, con = con2way(J,K)
m = np.array(np.arange(J*K)).reshape(J,K)
ic=0
test=np.array([])
for j in range(J):
for jj in range(J):
if j < jj:
for k in range(K):
for kk in range(K):
if k<kk:
#output[ic, 0]=j
#output[ic, 1]=jj
#output[ic, 2]=k
output[ic, 0]=ic
x1 = x[m[j, k]] - x[m[j, kk]]
x2 = x[m[jj, k]] - x[m[jj, kk]]
#print(f'X1 comparing cells {j, k} to {j, kk}')
#print(f'X2 comparing cells {jj, k} to {jj, kk}')
temp = yuen(x1, x2)
output[ic, 1] = trim_mean(x1, tr) - trim_mean(x2, tr)
#output[ic, 4] = trim_mean(x1, tr) - trim_mean(x2, tr)
test=np.append(test, temp['p_value'])
output[ic, 2] = test[ic]
#output[ic, 5] = test[ic]
ic+=1
ncon = len(test)
dvec = alpha / np.arange(1, ncon+1)
temp2 = (-test).argsort()
zvec = dvec[0:ncon]
#output[temp2, 6] = zvec
output[temp2, 3] = zvec
#output[:, 6] = output[:, 6]
output[:, 3] = output[:, 3]
col_names=["con_num", "psihat", "p_value", "p_crit"]
#col_names=["A_x", "A_y", "B_x", "B_y", "psihat", "p_value", "p_crit"]
results=pd.DataFrame(output, columns=col_names)
results={'con': con, 'output': pd.DataFrame(output, columns=col_names)}
return results | 82d2b77464e5bc37fc101624ed0d88205ab11ff9 | 3,659,506 |
def trigger_decoder(mode: str, trigger_path: str=None) -> tuple:
"""Trigger Decoder.
Given a mode of operation (calibration, copy phrase, etc) and
a path to the trigger location (*.txt file), this function
will split into symbols (A, ..., Z), timing info (32.222), and
targetness (target, nontarget). It will also extract any saved
offset information and pass that back.
PARAMETERS
----------
:param: mode: mode of bci operation. Note the mode changes how triggers
are saved.
:param: trigger_path: [Optional] path to triggers.txt file
:return: tuple: symbol_info, trial_target_info, timing_info, offset.
"""
# Load triggers.txt
if not trigger_path:
trigger_path = load_txt_data()
# Get every line of trigger.txt
with open(trigger_path, 'r+') as text_file:
# most trigger files has three columns:
# SYMBOL, TARGETNESS_INFO[OPTIONAL], TIMING
trigger_txt = [line.split() for line in text_file]
# extract stimuli from the text
stimuli_triggers = [line for line in trigger_txt
if line[1] == 'target' or
line[1] == 'nontarget']
# from the stimuli array, pull our the symbol information
symbol_info = list(map(lambda x: x[0], stimuli_triggers))
# If operating mode is free spell, it only has 2 columns
# otherwise, it has 3
if mode != 'free_spell':
trial_target_info = list(map(lambda x: x[1], stimuli_triggers))
timing_info = list(map(lambda x: eval(x[2]), stimuli_triggers))
else:
trial_target_info = None
timing_info = list(map(lambda x: eval(x[1]), stimuli_triggers))
# Get any offset or calibration triggers
offset_array = [line[2] for line in trigger_txt
if line[0] == 'offset']
calib_trigger_array = [line[2] for line in trigger_txt
if line[0] == 'calibration_trigger']
# If present, calculate the offset between the DAQ and Triggers from display
if len(offset_array) == 1 and len(calib_trigger_array) == 1:
# Extract the offset and calibration trigger time
offset_time = float(offset_array[0])
calib_trigger_time = float(calib_trigger_array[0])
# Calculate the offset (ASSUMES DAQ STARTED FIRST!)
offset = offset_time - calib_trigger_time
# Otherwise, assume no observed offset
else:
offset = 0
return symbol_info, trial_target_info, timing_info, offset | e4d19203e655173f638dc38c0123f88c7342aed1 | 3,659,507 |
def method_comparison(filename=None, extension="png", usetex=False,
passed_ax=None, **kwargs):
"""
Create a plot comparing how estimated redshift changes as a
function of dispersion measure for each DM-z relation.
Parameters
----------
filename: string or None, optional
The filename of the saved figure. Default: *None*
extension: string, optional
The format to save the figure. e.g "png", "pdf", "eps", etc...
Default: "png"
usetex: bool, optional
Use LaTeX for for fonts.
passed_ax: or None, optional
Generates
---------
A figure displaying how estimated redshift changes as a function of
dispersion measure for each of the different cosmologies.
"""
set_rc_params(usetex)
if passed_ax:
ax = passed_ax
else:
fig = plt.figure(figsize=(8, 8), constrained_layout=True)
ax = fig.add_subplot(111)
method_list = methods.available_methods()
dm_vals = np.linspace(0, 3000, 1000)
colours = ["#1b9e77", "#d95f02", "#7570b3"]
label = [r"$\rm{Ioka 2003}$", r"$\rm{Inoue 2004}$", r"$\rm{Zhang 2018}$"]
for j, method in enumerate(method_list):
z_vals = np.zeros(len(dm_vals))
if 'cosmology' in kwargs:
cosmology = kwargs['cosmology']
else:
cosmology = 'Planck18'
table_name = "".join(["_".join([method, cosmology]), ".npz"])
lookup_table = table.load(table_name)
for i, dm in enumerate(dm_vals):
z_vals[i] = table.get_z_from_table(dm, lookup_table)
ax.plot(dm_vals, z_vals, colours[j], label=label[j], **kwargs)
if not passed_ax:
ax.set_ylabel(r"$\rm{Redshift}$")
ax.set_xlabel(r"$\rm{DM\ \left[pc \ cm^{-3}\right]}$")
ax.legend(loc='lower right', frameon=False)
if filename is not None:
plt.savefig(".".join([filename, extension]))
if passed_ax:
return ax
else:
return fig | 8c3714cca3aac5f0f7893dc981b68265bf6cea9f | 3,659,508 |
def logCompression(pilImg):
"""Does log compression processing on a photo
Args:
pilImg (PIL Image format image): Image to be processed
"""
npImg = PILtoNumpy(pilImg)
c = 255 / (np.log10(1 + np.amax(npImg)))
for all_pixels in np.nditer(npImg, op_flags=['readwrite']):
all_pixels[...] = c * np.log10(1 + all_pixels)
return NumpytoPIL(npImg) | d6ab559182e7c836823d4c51fb6af395c1cd875f | 3,659,509 |
def quantile_turnover(quantile_factor, quantile, period=1):
"""
Computes the proportion of names in a factor quantile that were
not in that quantile in the previous period.
Parameters
----------
quantile_factor : pd.Series
DataFrame with date, asset and factor quantile.
quantile : int
Quantile on which to perform turnover analysis.
period: int, optional
Number of days over which to calculate the turnover.
Returns
-------
quant_turnover : pd.Series
Period by period turnover for that quantile.
"""
quant_names = quantile_factor[quantile_factor == quantile]
quant_name_sets = quant_names.groupby(level=['date']).apply(
lambda x: set(x.index.get_level_values('asset')))
name_shifted = quant_name_sets.shift(period)
new_names = (quant_name_sets - name_shifted).dropna()
quant_turnover = new_names.apply(
lambda x: len(x)) / quant_name_sets.apply(lambda x: len(x))
quant_turnover.name = quantile
return quant_turnover | 6c7b2afdd4c4f0a2dbf38064d2d8664a25370ca2 | 3,659,510 |
def dmp_div(f, g, u, K):
"""
Polynomial division with remainder in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_div(x**2 + x*y, 2*x + 2)
(0, x**2 + x*y)
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_div(x**2 + x*y, 2*x + 2)
(1/2*x + 1/2*y - 1/2, -y + 1)
"""
if K.is_Field:
return dmp_ff_div(f, g, u, K)
else:
return dmp_rr_div(f, g, u, K) | 1b8f2b2b9d57899862234233a70e7e76100b86be | 3,659,511 |
def is_designated_holiday(timestamp):
"""
Returns True if the date is one of Piedmont’s "designated holidays":
- New Years Day (January 1st)
- Memorial Day (last Monday of May)
- Independence Day (July 4th)
- Labor Day (First Monday of September)
- Thanksgiving Day (4th Thursday in November)
- Christmas Day (December 25th)
"""
dow = timestamp.weekday()
day = timestamp.day
month = timestamp.month
if month == JANUARY and timestamp.day == 1:
return True
elif month == MAY and dow == MONDAY and day > 25:
return True
elif month == JULY and day == 4:
return True
elif month == SEPTEMBER and dow == MONDAY and day < 8:
return True
elif month == NOVEMBER and dow == THURSDAY and 21 < day < 29:
return True
elif month == DECEMBER and day == 25:
return True
else:
return False | e6137ac2c3258a3e51294ff432971c04f56137ec | 3,659,512 |
def check(val, desc=None, as_warn=False) -> SimpleAssertions:
"""
function based assertion call
:param val: val to check
:param desc: optional, description of val
:param as_warn: if set, convert assertion error to warning message
:return: assertionClass
"""
return SimpleAssertions(as_warn=as_warn).check(val, desc) | 2115a0b16387cea0fef483a26a6c27daaf72387e | 3,659,513 |
def ChangeExtension(filename, newExtension):
"""ChangeExtension(filename, newExtension) -> str
Replaces the extension of the filename with the given one.
If the given filename has no extension, the new extension is
simply appended.
arguments:
filename
string corresponding to the filename whose extension to change.
newExtension
string corresponding to the new extension to append. Do not
prepend with a period ('.').
returns:
string corresponding to the new filename.
"""
try:
# Isolate the filename
slashIndex = filename.rfind('/')
backslashIndex = filename.rfind('\\')
if (backslashIndex > slashIndex):
slashIndex = backslashIndex;
# Look for an existing extension
periodIndex = filename.rfind('.')
if (periodIndex > slashIndex):
return filename[0 : periodIndex] + "." + newExtension
else:
return filename + "." + newExtension
except IndexError, e:
return "" | 0909060e01226520280aeabde906ab9a8f0dfc5d | 3,659,514 |
def file_based_input_fn_builder(input_file,
seq_length,
is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_masks": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"sent_label_ids": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record,
name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32. So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100, seed=np.random.randint(10000))
d = d.apply(tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn | 733ddf8b7add0cf9c610537cab0c31172260f0de | 3,659,515 |
def synchrotron_thin_spectrum(freqs, ne, te, bfield):
"""Optically thin (unobsorbed) synchrotron spectrum.
Units of erg/cm^3/s/Hz
NY95b Eq 3.9
"""
const = 4.43e-30 # erg/cm^3/s/Hz
theta_e = K_BLTZ * te / (MELC * SPLC * SPLC)
v0 = QELC * bfield / (2*np.pi*MELC*SPLC)
xm = 2*freqs/(3*v0*np.square(theta_e))
iprime = _synch_fit_func_iprime(xm)
esyn = const * 4*np.pi*ne*freqs*iprime/sp.special.kn(2, 1/theta_e)
return esyn | 1334cf0382eecd298472b4717b220a7ac3e96d0e | 3,659,516 |
import os
from pathlib import Path
from re import DEBUG
def extract_val_setup(timestamp, lat, lon, dataPath = "Data/IceData/"):
""" Extracts a timestamped value from a NSIDC GeoTIFF File
Inputs:
timestamp = datetime struct of sample
lat = sample latitude
lon = sample longitude
dataPath = path to GeoTIFF files
Outputs:
GeoTIFF raw value - please see https://nsidc.org/sites/nsidc.org/files/G02135-V3.0_0.pdf
"""
local_path = os.path.join(os.getcwd(), dataPath)
Path(local_path).mkdir(parents=True, exist_ok=True)
if lat < 0:
filename = generate_NSIDC_filename(timestamp, "S")
area = areas[0]
elif lat >= 0:
filename = generate_NSIDC_filename(timestamp, "N")
area = areas[1]
else:
print("=== Invalid Ice Area? ===")
raise ValueError
local_filename = local_path + filename
dataset = rasterio.open(local_filename)
if DEBUG:
rasterio.plot.show(dataset)
ice_data = dataset.read(1)
rev_xform = ~dataset.transform
outProj = Proj(dataset.crs)
inProj = Proj('epsg:4326')
coordxform = Transformer.from_proj(inProj, outProj)
# print("=== Proj Setup Complete ===")
return [ice_data, coordxform, rev_xform, area] | b3af95af7edd50e18bf5a1bf8e760cb0f12fda02 | 3,659,517 |
import base64
def create_message(service, to, subject, message_text):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
Returns:
An object containing a base64url encoded email object.
"""
message = MIMEText(message_text)
users = service.users()
myProfile = users.getProfile(userId='me').execute()
message['to'] = to
message['from'] = myProfile['emailAddress']
message['subject'] = subject
return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()} | 6c6df7b1825c13cb8016840527b4dd81ac26d266 | 3,659,518 |
def numpy_ndarray(nb_arr):
"""Return a copy of numba DeviceNDArray data as a numpy.ndarray.
"""
return nb_arr.copy_to_host() | d6ee1c62428783344fe6232ef229a6dabc8f2a2f | 3,659,519 |
def convert_to_dict(my_keys, my_values):
"""Merge a given list of keys and a list of values into a dictionary.
Args:
my_keys (list): A list of keys
my_values (list): A list corresponding values
Returns:
Dict: Dictionary of the list of keys mapped to the list of values
"""
return dict(zip(my_keys, my_values)) | e00690d27770539e6b9d2166835f6bd1b9c11c5a | 3,659,520 |
def add_vit(request):
"""
Add a new vit with API, currently image and video are not supported
"""
user = KeyBackend().authenticate(request)
if request.method == "POST":
if request.user.is_authenticated:
form = VitForm(request.POST)
if form.is_valid():
vit = form.save(commit=False)
vit.user = request.user
vit.save()
return JsonResponse({'status': 'success', 'vit': vit.to_json()}, status=201)
else:
return JsonResponse({'error': 'No vit body provided'}, status=400)
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
else:
return JsonResponse({'error': 'Invalid request'}, status=400) | 726776e036678cb79051d6ac800d5d883b947320 | 3,659,521 |
def long_control_state_trans(active, long_control_state, v_ego, v_target, v_pid,
output_gb, brake_pressed, cruise_standstill, min_speed_can):
"""Update longitudinal control state machine"""
stopping_target_speed = min_speed_can + STOPPING_TARGET_SPEED_OFFSET
stopping_condition = (v_ego < 2.0 and cruise_standstill) or \
(v_ego < STOPPING_EGO_SPEED and
((v_pid < stopping_target_speed and v_target < stopping_target_speed) or
brake_pressed))
starting_condition = v_target > STARTING_TARGET_SPEED and not cruise_standstill
if not active:
long_control_state = LongCtrlState.off
else:
if long_control_state == LongCtrlState.off:
if active:
long_control_state = LongCtrlState.pid
elif long_control_state == LongCtrlState.pid:
if stopping_condition:
long_control_state = LongCtrlState.stopping
elif long_control_state == LongCtrlState.stopping:
if starting_condition:
long_control_state = LongCtrlState.starting
elif long_control_state == LongCtrlState.starting:
if stopping_condition:
long_control_state = LongCtrlState.stopping
elif output_gb >= -BRAKE_THRESHOLD_TO_PID:
long_control_state = LongCtrlState.pid
return long_control_state | f13a5db692ce92fe36204eade10ebb2d54b9caed | 3,659,522 |
def load_group_to_namedtuple(group: h5py.Group):
"""Returns namedtuple with name of group and key: values of group attrs
e.g. srs1 group which has gpib: 1... will be returned as an srs1 namedtuple with .gpib etc
"""
# Check it was stored as a namedTuple
if group.attrs.get('description', None) != 'NamedTuple':
raise ValueError(
f'Trying to load_group_to_named_tuple which has description: {group.attrs.get("description", None)}')
# Get the name of the NamedTuple either through the stored name or the group name
name = group.attrs.get('NT_name', None)
if name is None:
logger.warning('Did not find "name" attribute for NamedTuple, using folder name instead')
name = group.name.split('/')[-1]
# d = {key: val for key, val in group.attrs.items()}
d = {key: get_attr(group, key) for key in group.attrs.keys()}
# Remove HDF only descriptors
for k in ['description', 'NT_name']:
if k in d.keys():
del d[k]
# Make the NamedTuple
ntuple = namedtuple(name, d.keys())
filled_tuple = ntuple(**d) # Put values into tuple
return filled_tuple | e33c0e1b367ddd2ebb745397d473c00452ba853f | 3,659,523 |
import json
def export_json(blocks=None, subsections=False):
"""
Returns JSON representation of parsed config structure
:param blocks: List of blocks to export
:param subsections: Export all subblocks
:return: JSON-dumped string
"""
if blocks is not None:
blocks = [_canonicalize_blockid(b) for b in blocks]
if subsections:
blocks = get_subblocks(blocks)
return json.dumps(_config_dict(blocks)) | 90ae2ee10d6d23f091d079bd87478fa10d3a4083 | 3,659,524 |
def get_dummy_message(text):
"""Get a dummy message with a custom text"""
return botogram.objects.messages.Message({
"message_id": 1,
"from": {"id": 123, "first_name": "Nobody"},
"chat": {"id": -123, "type": "chat", "title": "Something"},
"date": 1,
"text": text,
}) | 0f39712381157b46aed345ef6b46c6b3cfe32d95 | 3,659,525 |
import time
def list_ga4_entities(admin_api):
"""Get a dictionary of GA4 entity settings based on type.
Args:
admin_api: The Admin API object.
Returns:
A dictionary of GA4 entity setting lists.
"""
entities = {
'ga4_account_summaries': [],
'ga4_accounts': [],
'ga4_properties': [],
'ga4_data_streams': [],
'ga4_measurement_protocol_secrets': [],
'ga4_conversion_events': [],
'ga4_custom_dimensions': [],
'ga4_custom_metrics': [],
'ga4_dv360_link_proposals': [],
'ga4_dv360_links': [],
'ga4_firebase_links': [],
'ga4_google_ads_links': []
}
for account_summary in admin_api.list_account_summaries():
a_dict = {
'name': account_summary.name,
'display_name': account_summary.display_name,
'account': account_summary.account,
'property_summaries': []
}
for property_summary in account_summary.property_summaries:
p_dict = {
'property': property_summary.property,
'display_name': property_summary.display_name
}
a_dict['property_summaries'].append(p_dict)
entities['ga4_account_summaries'].append(a_dict)
time.sleep(REQUEST_DELAY)
for account in admin_api.list_accounts():
account_dict = {
'name': account.name,
'display_name': account.display_name,
'create_time': account.create_time,
'update_time': account.update_time,
'region_code': account.region_code,
'deleted': account.deleted
}
entities['ga4_accounts'].append(account_dict)
time.sleep(REQUEST_DELAY)
for account_summary in entities['ga4_account_summaries']:
prop_request = ListPropertiesRequest(
filter=f"parent:{account_summary['account']}")
for prop in admin_api.list_properties(prop_request):
time.sleep(REQUEST_DELAY)
data_retention_settings = admin_api.get_data_retention_settings(
name=(prop.name + '/dataRetentionSettings'))
time.sleep(REQUEST_DELAY)
google_signals_settings = admin_api.get_google_signals_settings(
name=(prop.name + '/googleSignalsSettings'))
ic_enum = prop.industry_category
sl_enum = prop.service_level
gss_state_enum = google_signals_settings.state
gss_consent_enum = google_signals_settings.consent
edr_enum = data_retention_settings.event_data_retention
prop_dict = {
'name': prop.name,
'create_time': prop.create_time,
'update_time': prop.update_time,
'parent': prop.parent,
'display_name': prop.display_name,
'industry_category': IndustryCategory(ic_enum).name,
'time_zone': prop.time_zone,
'currency_code': prop.currency_code,
'service_level': ServiceLevel(sl_enum).name,
'delete_time': prop.delete_time,
'expire_time': prop.expire_time,
'account': account_summary['account'],
'data_sharing_settings': {
'name': data_retention_settings.name,
'event_data_retention': (DataRetentionSettings
.RetentionDuration(edr_enum).name),
'reset_user_data_on_new_activity':
data_retention_settings.reset_user_data_on_new_activity
},
'google_signals_settings': {
'name': google_signals_settings.name,
'state': GoogleSignalsState(gss_state_enum).name,
'consent': GoogleSignalsConsent(gss_consent_enum).name
}
}
entities['ga4_properties'].append(prop_dict)
for property_summary in account_summary['property_summaries']:
time.sleep(REQUEST_DELAY)
for data_stream in admin_api.list_data_streams(
parent=property_summary['property']):
data_stream_dict = {
'name': data_stream.name,
'type': DataStream.DataStreamType(data_stream.type_).name,
'display_name': data_stream.display_name,
'create_time': data_stream.create_time,
'update_time': data_stream.update_time,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
if data_stream.web_stream_data != None:
data_stream_dict['web_stream_data'] = {
'measurment_id': data_stream.web_stream_data.measurement_id,
'firebase_app_id': data_stream.web_stream_data.firebase_app_id,
'default_uri': data_stream.web_stream_data.default_uri
}
time.sleep(REQUEST_DELAY)
for mps in admin_api.list_measurement_protocol_secrets(
parent=data_stream.name):
mps_dict = {
'name': mps.name,
'display_name': mps.display_name,
'secret_value': mps.secret_value,
'stream_name': data_stream.name,
'type': DataStream.DataStreamType(data_stream.type_).name,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_measurement_protocol_secrets'].append(mps_dict)
if data_stream.android_app_stream_data != None:
data_stream_dict['android_app_stream_data'] = {
'firebase_app_id': (data_stream
.android_app_stream_data.firebase_app_id),
'package_name': data_stream.android_app_stream_data.package_name
}
time.sleep(REQUEST_DELAY)
for mps in admin_api.list_measurement_protocol_secrets(
parent=data_stream.name):
mps_dict = {
'name': mps.name,
'display_name': mps.display_name,
'secret_value': mps.secret_value,
'stream_name': data_stream.name,
'type': DataStream.DataStreamType(data_stream.type_).name,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_measurement_protocol_secrets'].append(mps_dict)
if data_stream.ios_app_stream_data != None:
data_stream_dict['ios_app_stream_data'] = {
'firebase_app_id': data_stream.ios_app_stream_data.firebase_app_id,
'bundle_id': data_stream.ios_app_stream_data.bundle_id
}
time.sleep(REQUEST_DELAY)
for mps in admin_api.list_measurement_protocol_secrets(
parent=data_stream.name):
mps_dict = {
'name': mps.name,
'display_name': mps.display_name,
'secret_value': mps.secret_value,
'stream_name': data_stream.name,
'type': DataStream.DataStreamType(data_stream.type_).name,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_measurement_protocol_secrets'].append(mps_dict)
entities['ga4_data_streams'].append(data_stream_dict)
time.sleep(REQUEST_DELAY)
for event in admin_api.list_conversion_events(
parent=property_summary['property']):
event_dict = {
'name': event.name,
'event_name': event.event_name,
'create_time': event.create_time,
'deletable': event.deletable,
'custom': event.custom,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_conversion_events'].append(event_dict)
time.sleep(REQUEST_DELAY)
for cd in admin_api.list_custom_dimensions(
parent=property_summary['property']):
cd_dict = {
'name': cd.name,
'parameter_name': cd.parameter_name,
'display_name': cd.display_name,
'description': cd.description,
'scope': cd.scope,
'disallow_ads_personalization': cd.disallow_ads_personalization,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_custom_dimensions'].append(cd_dict)
time.sleep(REQUEST_DELAY)
for cm in admin_api.list_custom_metrics(
parent=property_summary['property']):
cm_dict = {
'name': cm.name,
'parameter_name': cm.parameter_name,
'display_name': cm.display_name,
'description': cm.description,
'scope': cm.scope,
'measurement_unit': cm.measurement_unit,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_custom_metrics'].append(cm_dict)
time.sleep(REQUEST_DELAY)
for link in admin_api.list_google_ads_links(
parent=property_summary['property']):
link_dict = {
'name': link.name,
'customer_id': link.customer_id,
'can_manage_clients': link.can_manage_clients,
'ads_personalization_enabled': link.ads_personalization_enabled,
'create_time': link.create_time,
'update_time': link.update_time,
'creator_email_address': link.creator_email_address,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_google_ads_links'].append(link_dict)
time.sleep(REQUEST_DELAY)
for link in admin_api.list_firebase_links(
parent=property_summary['property']):
link_dict = {
'name': link.name,
'project': link.project,
'create_time': link.create_time,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_firebase_links'].append(link_dict)
time.sleep(REQUEST_DELAY)
for link in admin_api.list_display_video360_advertiser_links(
parent=property_summary['property']):
link_dict = {
'name': link.name,
'advertiser_id': link.advertiser_id,
'advertiser_display_name': link.advertiser_display_name,
'ads_personalization_enabled': link.ads_personalization_enabled,
'campaign_data_sharing_enabled': link.campaign_data_sharing_enabled,
'cost_data_sharing_enabled': link.cost_data_sharing_enabled,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_dv360_links'].append(link_dict)
time.sleep(REQUEST_DELAY)
for proposal in (
admin_api.list_display_video360_advertiser_link_proposals(
parent=property_summary['property'])):
lpip_enum = (proposal.link_proposal_status_details
.link_proposal_initiating_product)
lps_enum = (proposal.link_proposal_status_details
.link_proposal_state)
proposals_dict = {
'name':
proposal.name,
'advertiser_id':
proposal.adveriser_id,
'link_proposal_status_details': {
'link_proposal_initiating_product':
LinkProposalInitiatingProduct(lpip_enum).name,
'requestor_email':
proposal.link_proposal_status_details.requestor_email,
'link_proposal_state': LinkProposalState(lps_enum).name
},
'advertiser_display_name':
proposal.advertiser_display_name,
'validation_email':
proposal.validation_email,
'ads_personalization_enabled':
proposal.ads_personalization_enabled,
'campaign_data_sharing_enabled':
proposal.campaign_data_sharing_enabled,
'cost_data_sharing_enabled':
proposal.cost_data_sharing_enabled,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_dv360_link_proposals'].append(proposal_dict)
return entities | 83b11c1a001a593da07f6aeb4333bad623bb7ee4 | 3,659,526 |
import textwrap
import os
def text_detection_background(text, background=None, hsize=15, vsize=15):
""" Given a string TEXT, generate a picture with text in it
arguments:
TEXT -- a string to be displayed
HSIZE -- maximum number of characters in one line
VSIZE -- maximum number of lines
background -- a file for background. If None, then use white
return: path to the desired photo, only one photo
"""
img_hsize = int(hsize * 640 / 15)
img_vsize = int(vsize * 480 / 15)
if not background:
img = Image.new('RGB', (img_hsize, img_vsize), color = "white")
color = (0,0,0)
else:
img = Image.open(background)
# width, height = img.size
# left, top, right, bottom = (0, 0, min(width, img_hsize), min(height, img_vsize))
# img = img.crop((left, top, right, bottom))
img = img.resize((img_hsize, img_vsize))
color = decide_font_color(background, pos=(30,30))
# Uses this font, can change to others if needed
fnt = ImageFont.truetype(os.path.join(FONT_SRC, "Times_CE_Regular.ttf"), 30)
d = ImageDraw.Draw(img)
para = textwrap.wrap(text, width=3*hsize)
if len(para)<=0:
para = [""]
try:
d.multiline_text((30, 30), '\n'.join(para), fill=color, font=fnt)
except:
d.multiline_text((30, 30), '\n'.join(para), fill=(0), font=fnt)
# d.text((30,30), text, font=fnt, fill=(0,0,0))
filename = os.path.join(TEXT_SRC, text+strftime("-%Y-%m-%d-%H-%M-%S.png", gmtime()))
img.save(filename)
return [filename] | d65677588130ffab84df210e772208fbfddba99f | 3,659,527 |
def print_sig(expr):
"""
Arguments:
- `expr`:
"""
return "{0!s} × {1!s}".format(expr.dom, expr.body) | be8d6fb1ad2256e2a825e383859f72db93318864 | 3,659,528 |
def is_grounded_concept(c: Concept) -> bool:
""" Check if a concept is grounded """
return (
"UN" in c.db_refs
and c.db_refs["UN"][0][0].split("/")[1] != "properties"
) | 2447b289cec20efc2aa359f37a795fd231004030 | 3,659,529 |
def _get_form(app, parent_form, factory_method, force_disable_csrf=False):
"""Create and fill a form."""
class AForm(parent_form):
pass
with app.test_request_context():
extra = _update_with_csrf_disabled() if force_disable_csrf else {}
RF = factory_method(AForm)
rf = RF(**extra)
rf.profile.username.data = "my username"
rf.profile.full_name.data = "My full name"
rf.validate()
return rf | b109d983dcf123812ede664719ab56f5462e84d4 | 3,659,530 |
def get_root_disk_size():
""" Get size of the root disk """
context = pyudev.Context()
rootfs_node = get_rootfs_node()
size_gib = 0
for device in context.list_devices(DEVTYPE='disk'):
# /dev/nvmeXn1 259 are for NVME devices
major = device['MAJOR']
if (major == '8' or major == '3' or major == '253' or
major == '259'):
devname = device['DEVNAME']
if devname == rootfs_node:
try:
size_gib = parse_fdisk(devname)
except Exception as e:
LOG.error("Could not retrieve disk size - %s " % e)
# Do not break config script, just return size 0
break
break
return size_gib | 4c01e189dfb4460d118fbd9b94c6a07e420c3bb1 | 3,659,531 |
import numpy
def convert_to_premultiplied_png(file):
"""
http://stackoverflow.com/questions/6591361/method-for-converting-pngs-to-premultiplied-alpha
"""
logger.info("converting to premultiplied alpha")
im = Img.open(file).convert('RGBA')
a = numpy.fromstring(im.tobytes(), dtype=numpy.uint8)
a = a.astype(numpy.float64)
alpha_layer = a[3::4] / 255.0
a[::4] *= alpha_layer
a[1::4] *= alpha_layer
a[2::4] *= alpha_layer
im = Img.frombytes("RGBA", im.size, a.astype(numpy.uint8).tostring())
f = BytesIO()
im.save(f, 'png')
f.seek(0)
return f | 2a2ebf9e3d1152e2d143ba799aab0ff0927653a8 | 3,659,532 |
def DSQuery(dstype, objectname, attribute=None):
"""DirectoryServices query.
Args:
dstype: The type of objects to query. user, group.
objectname: the object to query.
attribute: the optional attribute to query.
Returns:
If an attribute is specified, the value of the attribute. Otherwise, the
entire plist.
Raises:
DSException: Cannot query DirectoryServices.
"""
ds_path = '/%ss/%s' % (dstype.capitalize(), objectname)
cmd = [_DSCL, '-plist', '.', '-read', ds_path]
if attribute:
cmd.append(attribute)
(stdout, stderr, returncode) = RunProcess(cmd)
if returncode:
raise DSException('Cannot query %s for %s: %s' % (ds_path,
attribute,
stderr))
plist = NSString.stringWithString_(stdout).propertyList()
if attribute:
value = None
if 'dsAttrTypeStandard:%s' % attribute in plist:
value = plist['dsAttrTypeStandard:%s' % attribute]
elif attribute in plist:
value = plist[attribute]
try:
# We're copying to a new list to convert from NSCFArray
return value[:]
except TypeError:
# ... unless we can't
return value
else:
return plist | 2dea68b5897a46c90d2f8cf24e42519c272e70f1 | 3,659,533 |
def calculate_offset(lon, first_element_value):
"""
Calculate the number of elements to roll the dataset by in order to have
longitude from within requested bounds.
:param lon: longitude coordinate of xarray dataset.
:param first_element_value: the value of the first element of the longitude array to roll to.
"""
# get resolution of data
res = lon.values[1] - lon.values[0]
# calculate how many degrees to move by to have lon[0] of rolled subset as lower bound of request
diff = lon.values[0] - first_element_value
# work out how many elements to roll by to roll data by 1 degree
index = 1 / res
# calculate the corresponding offset needed to change data by diff
offset = int(round(diff * index))
return offset | a55eee1dd11b1b052d67ab1abadfc8087c1a2fe0 | 3,659,534 |
def min_mean_col(m: ma.MaskedArray) -> int:
"""Calculate the index of the column with the smallest mean.
"""
if ma.count_masked(m) == m.size:
return -1
col_mean = np.nanmean(m, axis=0)
return np.argmin(col_mean) | 499b7d5db38edc222aac6517d87d9df30285cb37 | 3,659,535 |
import resnet
import sys
import os
import torch
def load_simclrv2(init_args):
"""
Load pretrained SimCLR-v2 model.
"""
ckpt_file = init_args["ckpt_file"]
model_dir = init_args["model_dir"]
# Load the resnet.py that comes with the SimCLR-v2's PyTorch converter
sys.path.insert(
0,
os.path.join(
model_dir,
"SimCLRv2-Pytorch",
),
)
backbone, _ = resnet.get_resnet(depth=50, width_multiplier=1, sk_ratio=0)
backbone.load_state_dict(torch.load(ckpt_file, "cpu")["resnet"])
def forward(x):
# return the tensor obtained at the end of the network
# prior to global average pooling
return backbone(x, apply_fc=False)
return backbone, forward | 3d5521cf19b2e5c91af8ed4ce55a22e0573445d4 | 3,659,536 |
def minekey_read(request, mk_hmac, mk_fid, mk_fversion, mk_iid, mk_depth, mk_type, mk_ext, **kwargs):
"""
arguments: request, mk_hmac, mk_fid, mk_fversion, mk_iid, mk_depth, mk_type, mk_ext, **kwargs
implements: GET /key/(MK_HMAC)/(MK_FID)/(MK_FVERSION)/(MK_IID)/(MK_DEPTH)/(MK_TYPE).(MK_EXT)
returns: a suitable HttpResponse object
"""
if mk_type not in ('data', 'icon'):
diag = 'bad minekey method for GET'
Event.alert(request, 'minekey_read', diag=diag)
return HttpResponseNotFound(diag)
try:
mk = MineKey(request,
hmac=mk_hmac,
fid=mk_fid,
fversion=mk_fversion,
iid=mk_iid,
depth=mk_depth,
type=mk_type,
ext=mk_ext,
enforce_hmac_check=True)
except:
diag = 'bad minekey validation'
Event.alert(request, 'minekey_read', diag=diag)
if settings.DEBUG: raise
return HttpResponseNotFound(diag)
try:
Event.log(request, 'minekey_read', feed=mk.get_feed(), item=mk.get_item())
return mk.response()
except Exception as e:
Event.alert(request, 'minekey_read', diag=str(e))
raise | b80db062c71d43759602410cd75decf5eadf3a71 | 3,659,537 |
import os
def get_file_metadata(folder, video_relative_path):
""" """
# SAMPLE FILENAME: XXCam_01_20180517203949574.mp4
# XXXXX_XX_YYYYMMDDHHMMSSmmm.mp4
# 2019/01/01/XXCam-20180502-1727-34996.mp4
# XXCam-01-20180502-1727-34996.mp4
video_filename = os.path.basename(video_relative_path)
sub_folder = os.path.dirname(video_relative_path)
basename, extension = os.path.splitext(video_filename)
filename_parts_u = basename.split('_')
filename_parts_d = basename.split('-')
if len(filename_parts_u) == 3 and filename_parts_u[2].isdigit() and len(filename_parts_u[2]) == 17:
file_date = filename_parts_u[2][0:8]
file_time1 = filename_parts_u[2][8:12]
file_time2 = filename_parts_u[2][12:17]
basename_new = '%s-%s-%s-%s' % (filename_parts_u[0], file_date, file_time1, file_time2)
elif len(filename_parts_u) == 3 and filename_parts_u[2].isdigit() and len(filename_parts_u[2]) == 14:
# July2019 firmware update on Reolink camera changed filename format, therefore simplify mine!
file_date = filename_parts_u[2][0:8]
file_time1 = filename_parts_u[2][8:14]
# file_time2 = filename_parts_u[2][12:14]
basename_new = '%s-%s-%s' % (filename_parts_u[0], file_date, file_time1) # ,file_time2)
elif (len(filename_parts_d) == 4 and filename_parts_d[1].isdigit() and len(filename_parts_d[1]) == 8
and filename_parts_d[2].isdigit() and len(filename_parts_d[2]) == 4
and filename_parts_d[3].isdigit() and len(filename_parts_d[3]) == 5):
basename_new = basename
file_date = filename_parts_d[1]
elif (len(filename_parts_d) == 5 and filename_parts_d[2].isdigit() and len(filename_parts_d[2]) == 8
and filename_parts_d[3].isdigit() and len(filename_parts_d[3]) == 4
and filename_parts_d[4].isdigit() and len(filename_parts_d[4]) == 5):
basename_new = basename
file_date = filename_parts_d[2]
else:
basename_new = basename
file_date = 'NO_DATE'
return {'original': video_filename,
'sub_folder': sub_folder,
'source_fullpath': os.path.join(folder, video_relative_path),
'filename_new': '%s%s' % (basename_new, extension),
'basename_new': basename_new,
'basename_original': basename,
'file_date': file_date
} | c8fcc163f1b3fa89206d752181d5f814219fe74a | 3,659,538 |
def test_data_alignment(role_value, should_pass, check_model):
"""Test a custom model which returns a good and alignments from data().
qtmodeltest should capture this problem and fail when that happens.
"""
class MyModel(qt_api.QAbstractListModel):
def rowCount(self, parent=qt_api.QtCore.QModelIndex()):
return 1 if parent == qt_api.QtCore.QModelIndex() else 0
def data(
self, index=qt_api.QtCore.QModelIndex(), role=qt_api.QtCore.Qt.DisplayRole
):
if role == qt_api.QtCore.Qt.TextAlignmentRole:
return role_value
elif role == qt_api.QtCore.Qt.DisplayRole:
if index == self.index(0, 0):
return "Hello"
return None
check_model(MyModel(), should_pass=should_pass) | 4ebed3384cf5c694d72235e703b6f9594de5ff7b | 3,659,539 |
def cov(a, b):
"""Return the sample covariance of vectors a and b"""
a = flex.double(a)
b = flex.double(b)
n = len(a)
assert n == len(b)
resid_a = a - flex.mean(a)
resid_b = b - flex.mean(b)
return flex.sum(resid_a*resid_b) / (n - 1) | 94505852671e4652f96daa7b8e61f759aeca1dda | 3,659,540 |
from bs4 import BeautifulSoup
import re
def beautify(soup: BeautifulSoup, rich_terminal: bool = True) -> str:
"""
Cleans up the raw HTML so it's more presentable.
Parse BeautifulSoup HTML and return prettified string
"""
beautifiedText = str()
for i in soup:
if rich_terminal:
term = Terminal()
span_sub = r"{t.italic}\1{t.normal}".format(t=term)
strong_sub = r"{t.bold}\1{t.normal}".format(t=term)
else:
span_sub = r"\1"
strong_sub = r"\1"
i = re.sub(r'<span class="\w+">(.+)</span>', span_sub, str(i),)
i = re.sub(r"<strong>(.+)</strong>", strong_sub, str(i))
beautifiedText += " " + i
# Remove leading whitespace.
beautifiedText = re.sub(r"^\s+", "", beautifiedText)
# Compress all whitespace to a single space.
beautifiedText = re.sub(r"\s{2,}", " ", beautifiedText)
# Trim whitespace immediately preceding common punctuation.
beautifiedText = re.sub(r"\s+([,\)\].;:])", r"\g<1>", beautifiedText)
# Trim whitespace immediately following common punctuation.
beautifiedText = re.sub(r"([\(])\s+", r"\g<1>", beautifiedText)
return beautifiedText | df79666ad0ec9592e1a24325813b59e2d9711636 | 3,659,541 |
def design_complexity(design: Design) -> int:
"""Returns an approximation of the design's complexity to create."""
diversity = 3 * len(design.required)
abundance = 2 * sum(design.required.values())
return diversity + abundance + design.additional | b5be6336ce037d010bbb274dc6ce5538ac6ecae8 | 3,659,542 |
import torch
def approx_q_y(q_z, mu_lookup, logvar_lookup, k=10):
"""
refer to eq.13 in the paper
"""
q_z_shape = list(q_z.size()) # (b, z_dim)
mu_lookup_shape = [mu_lookup.num_embeddings, mu_lookup.embedding_dim] # (k, z_dim)
logvar_lookup_shape = [logvar_lookup.num_embeddings, logvar_lookup.embedding_dim] # (k, z_dim)
if not mu_lookup_shape[0] == k:
raise ValueError("mu_lookup_shape (%s) does not match the given k (%s)" % (
mu_lookup_shape, k))
if not logvar_lookup_shape[0] == k:
raise ValueError("logvar_lookup_shape (%s) does not match the given k (%s)" % (
logvar_lookup_shape, k))
if not q_z_shape[1] == mu_lookup_shape[1]:
raise ValueError("q_z_shape (%s) does not match mu_lookup_shape (%s) in dimension of z" % (
q_z_shape, mu_lookup_shape))
if not q_z_shape[1] == logvar_lookup_shape[1]:
raise ValueError("q_z_shape (%s) does not match logvar_lookup_shape (%s) in dimension of z" % (
q_z_shape, logvar_lookup_shape))
# TODO: vectorization and don't use for loop
batch_size = q_z_shape[0]
log_q_y_logit = torch.zeros(batch_size, k).type(q_z.type())
for k_i in torch.arange(0, k):
mu_k, logvar_k = mu_lookup(k_i), logvar_lookup(k_i)
log_q_y_logit[:, k_i] = log_gauss(q_z, mu_k, logvar_k) + np.log(1 / k)
q_y = torch.nn.functional.softmax(log_q_y_logit, dim=1)
return log_q_y_logit, q_y | 4bc9e18f7c260e11f4085c7deea993e375b87124 | 3,659,543 |
def svn_repos_get_logs2(*args):
"""
svn_repos_get_logs2(svn_repos_t repos, apr_array_header_t paths, svn_revnum_t start,
svn_revnum_t end, svn_boolean_t discover_changed_paths,
svn_boolean_t strict_node_history,
svn_repos_authz_func_t authz_read_func,
svn_log_message_receiver_t receiver,
apr_pool_t pool) -> svn_error_t
"""
return apply(_repos.svn_repos_get_logs2, args) | 8580b4df10d6a6ebecbea960e14cd2ca79720beb | 3,659,544 |
def error_function(theta, X, y):
"""Error function J definition"""
diff = np.dot(X, theta) - y
return (1. / 2 * m) * np.dot(np.transpose(diff), diff) | 0762651fe9f33107a4e198e70319f77a5ae50e0b | 3,659,545 |
def register(operation_name):
"""
Registers the decorated class as an Operation with the supplied operation name
:param operation_name: The identifying name for the Operation
"""
def wrapper(clazz):
if operation_name not in OPERATIONS:
OPERATIONS[operation_name] = clazz
return clazz
return wrapper | 036612a64d987ede2546bd5dbc9d848e6ed6c48b | 3,659,546 |
def url_exists(video):
"""
check each source for a url for this video; return True as soon as one is found. If none are found, return False
"""
max_timeout = int(kodi.get_setting('source_timeout'))
logger.log('Checking for Url Existence: |%s|' % (video), log_utils.LOGDEBUG)
for cls in relevant_scrapers(video.video_type):
if kodi.get_setting('%s-sub_check' % (cls.get_name())) == 'true':
scraper_instance = cls(max_timeout)
url = scraper_instance.get_url(video)
if url:
logger.log('Found url for |%s| @ %s: %s' % (video, cls.get_name(), url), log_utils.LOGDEBUG)
return True
logger.log('No url found for: |%s|' % (video), log_utils.LOGDEBUG)
return False | ee3c507e824255031e90eac92fbc0e3cc54ca788 | 3,659,547 |
import os
def test_actions_explicit_get_collector_action_for_unexisting_terminal():
"""
Test for situation when `get_collector` has an action for un-existing
terminal.
"""
action = get_collector()
@action
def INT(context, value):
return int(value)
@action
def STRING(context, value):
return "#{}#".format(value)
@action
def STRING2(context, value):
return "#{}#".format(value)
grammar = Grammar.from_file(os.path.join(THIS_FOLDER, 'grammar.pg'))
Parser(grammar, actions=action.all) | 896e4cf91f2c6ce8949974e501c06ac3f9a8745f | 3,659,548 |
def log2_grad(orig, grad):
"""Returns [grad * 1 / (log(2) * x)]"""
x = orig.args[0]
ones = ones_like(x)
two = const(2.0, dtype=x.checked_type.dtype)
return [grad * ones / (log(two) * x)] | c17d7eeee43e64e0eeb7feb86f357374cc2516e4 | 3,659,549 |
import posixpath
import requests
def _stream_annotation(file_name, pn_dir):
"""
Stream an entire remote annotation file from Physionet.
Parameters
----------
file_name : str
The name of the annotation file to be read.
pn_dir : str
The PhysioNet directory where the annotation file is located.
Returns
-------
ann_data : ndarray
The resulting data stream in numpy array format.
"""
# Full url of annotation file
url = posixpath.join(config.db_index_url, pn_dir, file_name)
# Get the content
response = requests.get(url)
# Raise HTTPError if invalid url
response.raise_for_status()
# Convert to numpy array
ann_data = np.fromstring(response.content, dtype=np.dtype('<u1'))
return ann_data | 8a1168562b87f27cc035d21b7f91cb23a611d0b4 | 3,659,550 |
def get_scenarios():
"""
Return a list scenarios and values for parameters in each of them
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
scenarios = get_scenarios_in_state(isess.state)
return build_json_response(scenarios, 200) | 2812cab14347f03e208d5c3b6399ffa954dfe843 | 3,659,551 |
def download() -> str:
""" Returns a download of the active files.
:return: the zip files needs to be downloaded.
"""
file_manager = utility.load_file_manager()
response = make_response(file_manager.zip_active_files(
"scrubbed_documents.zip"))
# Disable download caching
response.headers["Cache-Control"] = \
"max-age=0, no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response | a507c6fd2281226a0db4f4e9e84d2c4f0e6e9562 | 3,659,552 |
import subprocess
def evaluate_test_arff(model_path, test_arff_path, out_path):
"""
Obtain predictions of test_file using the trained model in model_path
:param output_folder:
:param output_name:
:param model_path:
:param test_file:
"""
# PREDICTIONS FILE HEADERS: INSTANCE, ACTUAL, PREDICTED, ERROR
bash_file_path = "../../data/bash_scripts/explorer_test_model.sh "
with open(out_path, 'w') as fi:
fi.close()
command = "".join([bash_file_path, test_arff_path, " ", model_path, " ", out_path])
print(command)
subprocess.call(command, shell=True)
remove_lines(out_path) # remove headers of prediction file
df_participant = pd.read_csv(out_path, header=0, sep=",")
return df_participant | 46f311efe952e63eceb8c93f56c8d53487ed13ae | 3,659,553 |
import pickle
def load_from_pickle_file(filepath):
""" Loads a pickle file into a python variable """
with open(filepath, "rb") as f:
python_obj = pickle.load(f)
return python_obj | 8ad8b947e762590d1be8d6b3ca4b519293692f09 | 3,659,554 |
from aiida.orm import Dict
from aiida_quantumespresso.utils.resources import get_default_options
def generate_inputs_pw(fixture_code, generate_structure, generate_kpoints_mesh, generate_upf_data):
"""Generate default inputs for a `PwCalculation."""
def _generate_inputs_pw():
"""Generate default inputs for a `PwCalculation."""
inputs = {
'code': fixture_code('quantumespresso.pw'),
'structure': generate_structure(),
'kpoints': generate_kpoints_mesh(2),
'parameters': Dict(dict={
'CONTROL': {
'calculation': 'scf'
},
'SYSTEM': {
'ecutrho': 240.0,
'ecutwfc': 30.0
}
}),
'pseudos': {
'Si': generate_upf_data('Si')
},
'metadata': {
'options': get_default_options()
}
}
return inputs
return _generate_inputs_pw | fbe0a332a011b1909380275b1f70444b2dfb5d17 | 3,659,555 |
def connected_components(edge_index, num_nodes=None):
"""Find the connected components of a given graph.
Args:
edge_index (LongTensor): Edge coordinate matrix.
num_nodes (int, optional): Number of nodes. Defaults to None.
Returns:
LongTensor: Vector assigning each node to its component index.
"""
if num_nodes is None:
num_nodes = edge_index.max().item() + 1
device = edge_index.device
row, col = edge_index.cpu()
out = cc_cpu.connected_components(row, col, num_nodes)
return out.to(device) | cb0b620fbd5577375b2ac79d62194537e61a1204 | 3,659,556 |
import tqdm
def assembly2graph(path=DATA_PATH):
"""Convert assemblies (assembly.json) to graph format"""
"""Return a list of NetworkX graphs"""
graphs = []
input_files = get_input_files(path)
for input_file in tqdm(input_files, desc="Generating Graphs"):
ag = AssemblyGraph(input_file)
graph = ag.get_graph_networkx()
graphs.append(graph)
return graphs, input_files | 46f2ed913c5047d68ee523f1a23382c2036fc1d7 | 3,659,557 |
def _get_default_backing(backing):
"""
_get_default_backing(backing)
Returns the prefered backing store
- if user provides a valid Backing object, use it
- if there is a default_backing object instantiated, use it
- if the user provided a configuration dict, use it to create
a new default_backing object
- otherwise, create a default_backing object using our defaults.
"""
# Probably they didn't mean to do this...
global default_backing, default_backing_config
if isinstance(backing, Backing):
return backing
if default_backing:
return default_backing
elif type(backing) is dict:
default_backing = Backing(**backing)
else:
# create a new default backing
default_backing = Backing(**default_backing_config)
return default_backing | 60f878ee730bea33b93b88e88dfe29885f6fac85 | 3,659,558 |
def slice(request, response, start, end=None):
"""Send a byte range of the response body
:param start: The starting offset. Follows python semantics including
negative numbers.
:param end: The ending offset, again with python semantics and None
(spelled "null" in a query string) to indicate the end of
the file.
"""
content = resolve_content(response)
response.content = content[start:end]
return response | 450e43afb988736dee991bcf284d5b92e11aec74 | 3,659,559 |
import warnings
def get_ps(sdfits, scan, ifnum=0, intnum=None, plnum=0, fdnum=0, method='vector', avgf_min=256):
"""
Parameters
----------
sdfits :
scan : int
Scan number.
plnum : int
Polarization number.
method : {'vector', 'classic'}, optional
Method used to compute the source temperature.
If set to ``'vector'`` it will use Eq. (16) of
Winkel et al. (2012). If set to ``'classic'`` it
will use the same method as GBTIDL.
The default is ``'vector'``.
Returns
-------
"""
ps_scan = sdfits.get_scans(scan, ifnum=ifnum, intnum=intnum, plnum=plnum)
rows = ps_scan.table
obsmode = rows["OBSMODE"]
last_on = rows["LASTON"]
last_off = rows["LASTOFF"]
procnum = rows["PROCSEQN"]
source = np.unique(rows['OBJECT'])[0]
tcal = np.average(rows['TCAL'], axis=0)
procname, swstate, swtchsig = obsmode[0].split(':')
if procname not in ["OffOn", "OnOff"]:
warnings.warn(f"Selected scan is not OnOff or OffOn, it is: {procname}."
f"Cannot get Tcal from this scan.")
return None
scan_on, scan_off = utils.get_ps_scan_pair(scan, procnum, procname)
sou_on = sdfits.get_scans(scan_on, sig="T", cal="T", ifnum=ifnum, intnum=intnum, plnum=plnum)
sou_off = sdfits.get_scans(scan_on, sig="T", cal="F", ifnum=ifnum, intnum=intnum, plnum=plnum)
off_on = sdfits.get_scans(scan_off, sig="T", cal="T", ifnum=ifnum, intnum=intnum, plnum=plnum)
off_off = sdfits.get_scans(scan_off, sig="T", cal="F", ifnum=ifnum, intnum=intnum, plnum=plnum)
if method == 'vector':
sou_on.average()
sou_off.average()
off_on.average()
off_off.average()
off_freq = off_off.freq
sou_freq = sou_on.freq
nchan = off_on.data.shape[0]
facs = utils.factors(nchan)
avgf = np.min(facs[facs >= avgf_min])
kappa_off = get_kappa(off_on.data, off_off.data, avgf=avgf)
kappa_freq = off_freq.reshape(nchan//avgf, avgf).mean(axis=1)
# Interpolate back to high frequency resolution.
pt = np.argsort(kappa_freq)
pi = np.argsort(sou_freq)
kappa_interp = np.interp(sou_freq.to('Hz').value[pi], kappa_freq.to('Hz').value[pt], kappa_off)
# Compute the source temperature (Eq. (16) in Winkel et al. 2012).
tsou_on = (kappa_interp + 1.)*tcal*(sou_on.data - off_on.data)/off_on.data
tsou_off = kappa_interp*tcal*(sou_off.data - off_off.data)/off_off.data
# Average.
tsou = 0.5*(tsou_on + tsou_off)
elif method == 'gbtidl':
# Eqs. (1) and (2) from Braatz (2009, GBTIDL calibration guide)
# https://www.gb.nrao.edu/GBT/DA/gbtidl/gbtidl_calibration.pdf
tsys = gbtidl_tsys(off_on.data, off_off.data, tcal)
sig = 0.5*(sou_on.data + sou_off.data)
ref = 0.5*(off_on.data + off_off.data)
ta = gbtidl_sigref2ta(sig, ref, tsys)
tint_sou = 0.5*(sou_on.table["EXPOSURE"] + sou_off.table["EXPOSURE"])
tint_off = 0.5*(off_on.table["EXPOSURE"] + off_off.table["EXPOSURE"])
tint = 0.5*(tint_sou + tint_off)
dnu = np.mean(sou_on.table["CDELT1"])
tsou = np.average(ta, axis=0, weights=dnu*tint*np.power(tsys, -2.))
elif method == 'classic':
tsys = classic_tsys(off_on.data, off_off.data, tcal)
ta_on = (sou_on.data - off_on.data)/off_on.data*(tsys[:,np.newaxis] + tcal)
ta_off = (sou_off.data - off_off.data)/off_off.data*(tsys[:,np.newaxis])
tint_sou = 0.5*(sou_on.table["EXPOSURE"] + sou_off.table["EXPOSURE"])
tint_off = 0.5*(off_on.table["EXPOSURE"] + off_off.table["EXPOSURE"])
tint = 0.5*(tint_sou + tint_off)
dnu = np.mean(sou_on.table["CDELT1"])
ta_on = np.average(ta_on, axis=0, weights=dnu*tint_sou*np.power(tsys, -2.))
ta_off = np.average(ta_off, axis=0, weights=dnu*tint_off*np.power(tsys, -2.))
tsou = 0.5*(ta_on + ta_off)
return tsou | 8226f4e38ab7fce3c1c7f66c88b275e7c7798d86 | 3,659,560 |
import click
def choiceprompt(variable: Variable) -> Binding:
"""Prompt to choose from several values for the given name."""
if not variable.choices:
raise ValueError("variable with empty choices")
choices = {str(number): value for number, value in enumerate(variable.choices, 1)}
lines = [
f"Select {variable.name}:",
*[f"{number} - {value}" for number, value in choices.items()],
"Choose from {}".format(", ".join(choices.keys())),
]
choice = click.prompt(
"\n".join(lines),
type=click.Choice(list(choices)),
default="1",
show_choices=False,
)
return bind(variable, choices[choice]) | 78700032f93abaca2227d653d5f199e6dbf3b4ba | 3,659,561 |
import urllib
import certifi
import urllib3
import http
import logging
def GetCLInfo(review_host, change_id, auth_cookie='', include_messages=False,
include_detailed_accounts=False):
"""Get the info of the specified CL by querying the Gerrit API.
Args:
review_host: Base URL to the API endpoint.
change_id: Identity of the CL to query.
auth_cookie: Auth cookie if the API is not public.
include_messages: Whether to pull and return the CL messages.
include_detailed_accounts: Whether to pull and return the email of users
in CL messages.
Returns:
An instance of `CLInfo`. Optional fields might be `None`.
Raises:
GitUtilException if error occurs while querying the Gerrit API.
"""
url = f'{review_host}/changes/{change_id}'
params = []
if include_messages:
params.append(('o', 'MESSAGES'))
if include_detailed_accounts:
params.append(('o', 'DETAILED_ACCOUNTS'))
if params:
url = url + '?' + urllib.parse.urlencode(params)
pool_manager = PoolManager(ca_certs=certifi.where())
pool_manager.headers['Cookie'] = auth_cookie
pool_manager.headers['Content-Type'] = 'application/json'
pool_manager.headers['Connection'] = 'close'
try:
r = pool_manager.urlopen('GET', url)
except urllib3.exceptions.HTTPError:
raise GitUtilException(f'invalid url {url}')
if r.status != http.client.OK:
raise GitUtilException(f'request unsuccessfully with code {r.status}')
try:
# the response starts with a magic prefix line for preventing XSSI which
# should be stripped.
stripped_json = r.data.split(b'\n', 1)[1]
json_data = json_utils.LoadStr(stripped_json)
except Exception:
raise GitUtilException('Response format Error: %r' % (r.data, ))
def _ConvertGerritCLMessage(json_data):
return CLMessage(
json_data['message'],
json_data['author']['email'] if include_detailed_accounts else None)
try:
return CLInfo(json_data['change_id'], json_data['_number'],
_GERRIT_CL_STATUS_TO_CL_STATUS[json_data['status']],
[_ConvertGerritCLMessage(x) for x in json_data['messages']]
if include_messages else None)
except Exception as ex:
logging.debug('Unexpected Gerrit API response for CL info: %r', json_data)
raise GitUtilException('failed to parse the Gerrit API response') from ex | b43f5f47ca957aefaaf0764b61318d87c4a614ec | 3,659,562 |
import base64
import hashlib
def rehash(file_path):
"""Return (hash, size) for a file with path file_path. The hash and size
are used by pip to verify the integrity of the contents of a wheel."""
with open(file_path, 'rb') as file:
contents = file.read()
hash = base64.urlsafe_b64encode(hashlib.sha256(contents).digest()).decode('latin1').rstrip('=')
size = len(contents)
return hash, size | 167449640e8cbf17d36e7221df3490a12381dd8e | 3,659,563 |
import requests
def _magpie_update_services_conflict(conflict_services, services_dict, request_cookies):
# type: (List[Str], ServicesSettings, AnyCookiesType) -> Dict[Str, int]
"""
Resolve conflicting services by name during registration by updating them only if pointing to different URL.
"""
magpie_url = get_magpie_url()
statuses = dict()
for svc_name in conflict_services:
statuses[svc_name] = 409
svc_url_new = services_dict[svc_name]["url"]
svc_url_db = "{magpie}/services/{svc}".format(magpie=magpie_url, svc=svc_name)
svc_resp = requests.get(svc_url_db, cookies=request_cookies)
svc_info = get_json(svc_resp).get(svc_name)
svc_url_old = svc_info["service_url"]
if svc_url_old != svc_url_new:
svc_info["service_url"] = svc_url_new
res_svc_put = requests.patch(svc_url_db, data=svc_info, cookies=request_cookies)
statuses[svc_name] = res_svc_put.status_code
print_log("[{url_old}] => [{url_new}] Service URL update ({svc}): {resp}"
.format(svc=svc_name, url_old=svc_url_old, url_new=svc_url_new, resp=res_svc_put.status_code),
logger=LOGGER)
return statuses | 71f72680aebd1fd781c5cbd9e77eeba06a64062e | 3,659,564 |
def rtc_runner(rtc):
"""Resolved tool contract runner."""
return run_main(polish_chunks_pickle_file=rtc.task.input_files[0],
sentinel_file=rtc.task.input_files[1],
subreads_file=rtc.task.input_files[2],
output_json_file=rtc.task.output_files[0],
max_nchunks=rtc.task.max_nchunks) | c9d5a1c23e5b88c6d7592dde473dc41123616a77 | 3,659,565 |
from typing import Any
import json
def dict_to_json_str(o: Any) -> str:
"""
Converts a python object into json.
"""
json_str = json.dumps(o, cls=EnhancedJSONEncoder, sort_keys=True)
return json_str | 8377cee2e25d5daeefd7a349ede02f7134e052b2 | 3,659,566 |
def paramid_to_paramname(paramid):
"""Turn a parameter id number into a parameter name"""
try:
return param_info[paramid]['n']
except KeyError:
return "UNKNOWN_%s" % str(hex(paramid)) | c5e47c6754448a20d79c33b6b501039b1463108e | 3,659,567 |
def max_dist_comp(G, cc0, cc1):
""" Maximum distance between components
Parameters
----------
G : nx.graph
Graph
cc0 : list
Component 0
cc1 : list
Compoennt 1
Returns
-------
threshold : float
Maximum distance
"""
# Assertions
assert isinstance(G, nx.Graph), "G is not a NetworkX graph"
# Calculation
threshold = 0
for n0 in cc0:
for n1 in cc1:
distance = metrics.distance_between_nodes(G, n0, n1)
if distance > threshold:
threshold = distance
return threshold | 5df633ee746d537462b84674dc5adadd6f6f7e53 | 3,659,568 |
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-100,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
for word, label in zip(example.words, example.labels):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
features.append(
InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids)
)
return features | 02e4a731c818e0833152aa8e44d6a49e523ef1fb | 3,659,569 |
def exp(var):
"""
Returns variable representing exp applied to the input variable var
"""
result = Var(np.exp(var.val))
result.parents[var] = var.children[result] = np.exp(var.val)
return result | d2811bbb240da33ce158a8bff5739ace2275da0e | 3,659,570 |
from typing import List
from typing import Any
from typing import Callable
def invalidate_cache(
key: str = None,
keys: List = [],
obj: Any = None,
obj_attr: str = None,
namespace: str = None,
):
"""Invalidates a specific cache key"""
if not namespace:
namespace = HTTPCache.namespace
if key:
keys = [key]
def wrapper(func: Callable):
@wraps(func)
async def inner(*args, **kwargs):
try:
# extracts the `id` attribute from the `obj_attr` giparameter passed to the `@cache` method
_obj = kwargs.get(f"{obj}", None)
_keys = await HTTPKeys.generate_keys(
keys=keys, config=HTTPCache, obj=_obj, obj_attr=obj_attr
)
_cache = HTTPCacheBackend(
redis=HTTPCache.redis_client, namespace=namespace
)
await _cache.invalidate_all(keys=_keys)
_computed_response = await func(*args, **kwargs)
return _computed_response
except Exception as e:
log_error(msg=f"Cache Error: {e}", e=e, method="cache")
return await func(*args, **kwargs)
return inner
return wrapper | 1b200db81db9d1134bbbbb8a09d3b57f5c6623dc | 3,659,571 |
def read_dataset(filename):
"""Reads in the TD events contained in the N-MNIST/N-CALTECH101 dataset file specified by 'filename'"""
# NMIST: 34×34 pixels big
f = open(filename, 'rb')
raw_data = np.fromfile(f, dtype=np.uint8)
f.close()
raw_data = np.uint32(raw_data)
all_y = raw_data[1::5]
all_x = raw_data[0::5]
all_p = (raw_data[2::5] & 128) >> 7 #bit 7
all_ts = ((raw_data[2::5] & 127) << 16) | (raw_data[3::5] << 8) | (raw_data[4::5])
#Process time stamp overflow events
time_increment = 2 ** 13
overflow_indices = np.where(all_y == 240)[0]
for overflow_index in overflow_indices:
all_ts[overflow_index:] += time_increment
#Everything else is a proper td spike
td_indices = np.where(all_y != 240)[0]
events = np.stack([all_x[td_indices], all_y[td_indices], all_ts[td_indices], all_p[td_indices]], axis=1).astype(np.float32)
# events[:,3] = 2*events[:,3]-1
return events | 74861968e36d3e357ce62f615456aa02eb3bd28b | 3,659,572 |
def prge_annotation():
"""Returns an annotation with protein/gene entities (PRGE) identified.
"""
annotation = {"ents": [{"text": "p53", "label": "PRGE", "start": 0, "end": 0},
{"text": "MK2", "label": "PRGE", "start": 0, "end": 0}],
"text": "p53 and MK2",
"title": ""}
return annotation | dda417c1c1a1146482f4a3340741d938714dbf30 | 3,659,573 |
from scipy.spatial import Delaunay
def inner_points_mask(points):
"""Mask array into `points` where ``points[msk]`` are all "inner" points,
i.e. `points` with one level of edge points removed. For 1D, this is simply
points[1:-1,:] (assuming ordered points). For ND, we calculate and remove
the convex hull.
Parameters
----------
points : nd array (npoints, ndim)
Returns
-------
msk : (npoints, ndim)
Bool array.
"""
msk = np.ones((points.shape[0],), dtype=bool)
if points.shape[1] == 1:
assert (np.diff(points[:,0]) >= 0.0).all(), ("points not monotonic")
msk[0] = False
msk[-1] = False
else:
tri = Delaunay(points)
edge_idx = np.unique(tri.convex_hull)
msk.put(edge_idx, False)
return msk | d81788fdbe3f19f67719951b319bcdd5e01d4d60 | 3,659,574 |
from typing import Union
import torch
from typing import Tuple
from typing import List
def array2list(X_train: Union[np.ndarray, torch.Tensor],
y_train: Union[np.ndarray, torch.Tensor],
X_test: Union[np.ndarray, torch.Tensor],
y_test: Union[np.ndarray, torch.Tensor],
batch_size: int, memory_alloc: float = 4
) -> Union[Tuple[List[np.ndarray]], Tuple[List[torch.Tensor]]]:
"""
Splits train and test numpy arrays or torch tensors into lists of
arrays/tensors of a specified size. The remainders are not included.
"""
all_data = [X_train, y_train, X_test, y_test]
arrsize = sum([get_array_memsize(x) for x in all_data])
store_on_cpu = (arrsize / 1e9) > memory_alloc
X_train = array2list_(X_train, batch_size, store_on_cpu)
y_train = array2list_(y_train, batch_size, store_on_cpu)
X_test = array2list_(X_test, batch_size, store_on_cpu)
y_test = array2list_(y_test, batch_size, store_on_cpu)
return X_train, y_train, X_test, y_test | 8b4703b9296a786d24ae97edf142fc917d9a3b07 | 3,659,575 |
def livecoding_redirect_view(request):
"""
livecoding oath2 fetch access token after permission dialog
"""
code = request.GET.get('code')
if code is None:
return HttpResponse("code param is empty/not found")
try:
url = "https://www.livecoding.tv/o/token/"
data = dict(code=code, grant_type='authorization_code', redirect_uri=LIVECODING_REDIRECT_URI,
client_id=LIVECODING_KEY, client_secret=LIVECODING_SECRET)
response = requests.post(url, data=data)
except urllib2.URLError as e:
print(e)
return HttpResponse("Failed to make POST request to fetch token")
res = json.loads(response.content)
print res
access_token = res['access_token']
print(access_token)
user = User.objects.get(username='admin')
print user
a, created = AccessToken.objects.get_or_create(user=user)
print a, created
a.access_token = access_token
a.save()
print(a)
redirect = request.GET.get('redirect')
if redirect is None:
return HttpResponse(response.content)
else:
return HttpResponseRedirect(redirect) | acf06a996da8b70d982fa670080b48faeb452f60 | 3,659,576 |
from typing import OrderedDict
def sort_dict(value):
"""Sort a dictionary."""
return OrderedDict((key, value[key]) for key in sorted(value)) | 93e03b64d44ab79e8841ba3ee7a3546c1e38d6e4 | 3,659,577 |
def hyb_stor_capacity_rule(mod, prj, prd):
"""
Power capacity of a hybrid project's storage component.
"""
return 0 | 86ed72e48738df66fca945ff8aaf976f0a7d14e0 | 3,659,578 |
def gilr_layer_cpu(X, hidden_size, nonlin=tf.nn.elu,
name='gilr'):
"""
g_t = sigmoid(Ux_t + b)
h_t = g_t h_{t-1} + (1-g_t) f(Vx_t + c)
"""
with vscope(name):
n_dims = X.get_shape()[-1].value
act = fc_layer(X, 2 * hidden_size, nonlin=tf.identity)
gate, impulse = tf.split(act, 2, len(act.shape) - 1)
gate = tf.sigmoid(gate)
impulse = nonlin(impulse)
return s_linear_recurrence_cpu(gate, (1-gate) * impulse) | fce2d10be0b8ccb5923d1781795d08b562d602bf | 3,659,579 |
from typing import Callable
from typing import Awaitable
from re import A
from typing import Tuple
import asyncio
async def retry(f: Callable[..., Awaitable[A]], schedule: Schedule[Exception, Tuple[OpinionT, float]]):
"""
Run an awaitable computation,
retrying on failures according to a schedule.
"""
while True:
try:
result = await f()
except Exception as ex:
try:
opinion, delay = schedule.update(ex)
except ScheduleConcluded:
raise ex from None
else:
await asyncio.sleep(delay)
# TODO: do something with opinion
yield (ex, opinion)
else:
return result | d59367da8ae63a4932948b55c7ffcdcc5c8b3c2d | 3,659,580 |
import torch
import os
def dual_solve_u(v, s, alpha, eps, verbose=False, n_iters=100, gtol=0):
"""
min_{u>=0} max_pi L(pi, u, v)
= E_xy [ u(x)alpha(x) + v(y)beta(y) + Softplus(1/eps)(s-u-v) ],
where u = min{u>=0 : E_y[pi(x,y)] <= alpha(x)}
find exact u s.t. E_y[pi(x,y)] == alpha(x)
"""
alpha = torch.as_tensor(alpha, device=s.device).clip(0, 1)
eps = torch.as_tensor(eps, device=s.device)
z = alpha.log() - (1 - alpha).log()
if alpha.amax() <= 0 or alpha.amin() >= 1: # z = +-infinity
u = -z * torch.ones_like(s[:, 0])
return u, 0
v_inp = torch.as_tensor(v, device=s.device).reshape((1, -1))
if 'CVX_STABLE' in os.environ and int(os.environ['CVX_STABLE']):
v = v_inp
else:
s = s_u_v(s, None, v)
v = None
u_min = s_u_v(s, None, v).amin(1) - z * eps - 1e-3
u_max = s_u_v(s, None, v).amax(1) - z * eps + 1e-3
u_guess = [ # avoids large negative prior_score when s>=0 if most valid cases
torch.zeros_like(u_min) + (0 - v_inp).amin() - z * eps - 1e-3,
]
# u_guess.extend(
# s_u_v(s, None, v).topk(
# (alpha * s.shape[1] + 1).clip(None, s.shape[1]).int()
# ).values[:, -3:].T
# )
assert (grad_u(u_min, v, s, alpha, eps) <= 0).all()
assert (grad_u(u_max, v, s, alpha, eps) >= 0).all()
for i in range(n_iters):
if i < len(u_guess):
u = u_guess[i]
else:
u = (u_min + u_max) / 2
g = grad_u(u, v, s, alpha, eps)
assert not u.isnan().any()
if g.abs().max() < gtol:
break
u_min = torch.where(g < 0, u, u_min)
u_max = torch.where(g > 0, u, u_max)
return u, (i + 1) | 867f02897568b84a33f5fe5c5c41795e69b3b08d | 3,659,581 |
import urllib
import json
def activation(formula=None, instrument=None,
flux=None, cdratio=0, fastratio=0,
mass=None, exposure=24, getdata=False):
"""Calculate sample activation using the FRM II activation web services.
``formula``:
the chemical formula, see below for possible formats
The *flux* can be specified either by:
``instrument``:
the instrument name to select flux data
or:
``flux``:
The thermal flux (for cold instruments use the equivalent
thermal flux)
``cdratio``:
The ratio between full flux and flux with 1mm Cd in the beam,
0 to deactivate
``fastratio``:
Thermal/fast neutron ratio, 0 to deactivate
``mass``:
the sample mass in g
``exposure``:
exposure time in h, default 24h
``getdata``:
In addition to printing the result table,
return a dict with the full results for further
processing
**Formula input format**
Formula:
``CaCO3``
Formula with fragments:
``CaCO3+6H2O``
Formula with parentheses:
``HO ((CH2)2O)6 H``
Formula with isotope:
``CaCO[18]3+6H2O``
Counts can be integer or decimal:
``CaCO3+(3HO1.5)2``
Mass fractions use %wt, with the final portion adding to 100%:
``10%wt Fe // 15% Co // Ni``
Volume fractions use %vol, with the final portion adding to 100%:
``10%vol [email protected] // [email protected]``
For volume fractions you have to specify the density using
``@<density>``!
Mixtures can nest. The following is a 10% salt solution by weight \
mixed 20:80 by volume with D2O:
``20%vol (10%wt [email protected] // H2O@1) // D2O@1``
"""
if formula is None:
try:
# preparation for a future enhanced sample class
formula = session.experiment.sample.formula
except (ConfigurationError, AttributeError):
# ConfigurationError is raised if no experiment is in session
pass
if formula is None:
raise UsageError('Please give a formula')
if flux:
instrument = 'Manual'
if instrument is None:
try:
instrument = session.instrument.instrument or None
except ConfigurationError:
pass
if instrument is None:
raise UsageError('Please specifiy an instrument or flux')
if mass is None:
try:
formula = session.experiment.sample.mass
except (ConfigurationError, AttributeError):
pass
if mass is None:
raise UsageError('Please specify the sample mass')
qs = '?json=1&formula=%(formula)s&instrument=%(instrument)s&mass=%(mass)g' \
% locals()
if flux:
qs += '&fluence=%(flux)f&cdratio=%(cdratio)f&fastratio=%(fastratio)f' \
% locals()
qs = ACTIVATIONURL + qs
try:
with urllib.request.urlopen(qs) as response:
data = json.load(response)
except urllib.error.HTTPError as e:
session.log.warning('Error opening: %s', qs)
session.log.warning(e)
return None
if data['ecode'] == 'unknown instrument' and flux is None:
session.log.warning('Instrument %s unknown to calculator, '
'specify flux manually', instrument)
session.log.info('Known instruments')
printTable(['instrument'], [(d, ) for d in data['instruments']],
session.log.info)
if data['result']['activation']:
h = data['result']['activation']['headers']
th = [h['isotope'], h['daughter'], h['reaction'], h['Thalf_str']]
for ha in h['activities']:
th.append(ha)
rows = []
for r in data['result']['activation']['rows']:
rd = [r['isotope'], r['daughter'], r['reaction'], r['Thalf_str']]
for a in r['activities']:
rd.append('%.3g' % a if a > 1e-6 else '<1e-6')
rows.append(rd)
dr = ['', '', '', 'Dose (uSv/h)']
for d in data['result']['activation']['doses']:
dr.append('%.3g' % d)
rows.append(dr)
printTable(th, rows, session.log.info)
else:
session.log.info('No activation')
if getdata:
return data
return | 40588f5d5d76625b759f6642205b28aba8b9ceb8 | 3,659,582 |
def wrap_parfor_blocks(parfor, entry_label = None):
"""wrap parfor blocks for analysis/optimization like CFG"""
blocks = parfor.loop_body.copy() # shallow copy is enough
if entry_label == None:
entry_label = min(blocks.keys())
assert entry_label > 0 # we are using 0 for init block here
# add dummy jump in init_block for CFG to work
blocks[0] = parfor.init_block
blocks[0].body.append(ir.Jump(entry_label, blocks[0].loc))
for block in blocks.values():
if len(block.body) == 0 or (not block.body[-1].is_terminator):
block.body.append(ir.Jump(entry_label, block.loc))
return blocks | 03528c18c9cd1f8d9671d12e0a4fa8668003305b | 3,659,583 |
from typing import Counter
def frequency_of_occurrence(words, specific_words=None):
"""
Returns a list of (instance, count) sorted in total order and then from most to least common
Along with the count/frequency of each of those words as a tuple
If specific_words list is present then SUM of frequencies of specific_words is returned
"""
freq = sorted(sorted(Counter(words).items(), key=itemgetter(0)), key=itemgetter(1), reverse=True)
if not specific_words or specific_words==None:
return freq
else:
frequencies = 0
for (inst, count) in freq:
if inst in specific_words:
frequencies += count
return float(frequencies) | a98670a89e843774bd1237c0d2e518d2cd8fb242 | 3,659,584 |
from typing import Union
def cache_remove_all(
connection: 'Connection', cache: Union[str, int], binary=False,
query_id=None,
) -> 'APIResult':
"""
Removes all entries from cache, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_REMOVE_ALL,
[
('hash_code', Int),
('flag', Byte),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
},
) | 81e7cdbae9b3a04e205e15275dee2c45caa96d36 | 3,659,585 |
import json
from typing import OrderedDict
import os
import re
def transform_bundle(bundle_uuid, bundle_version, bundle_path, bundle_manifest_path, extractor=None):
"""
This function is used with the ETL interface in dcplib.etl.DSSExtractor.extract.
Given a bundle ID and directory containing its medatata JSON files, it produces an intermediate representation
of the bundle and its files ready to be inserted into the database by BundleLoader.
"""
result = dict(uuid=bundle_uuid,
version=bundle_version,
manifest=json.load(open(bundle_manifest_path)),
aggregate_metadata={},
files=OrderedDict())
# Load and process all the metadata files; construct the "aggregate_metadata" doc:
# - Singleton metadata files get inserted under their name minus the extension (project.json => {"project": {...}})
# - Numbered metadata files are put in an array (assay_0.json, assay_1.json => {"assay": [{...0...}, {...1...}]})
bundle_fetched_files = sorted(os.listdir(bundle_path)) if os.path.exists(bundle_path) else []
for f in bundle_fetched_files:
if re.match(r"(.+)_(\d+).json", f):
metadata_key, index = re.match(r"(.+)_(\d+).json", f).groups()
elif re.match(r"(.+).json", f):
metadata_key, index = re.match(r"(.+).json", f).group(1), None
else:
metadata_key, index = f, None
with open(os.path.join(bundle_path, f)) as fh:
file_doc = json.load(fh)
if index is None:
result["aggregate_metadata"][metadata_key] = file_doc
else:
result["aggregate_metadata"].setdefault(metadata_key, [])
result["aggregate_metadata"][metadata_key].append(file_doc)
for fm in result["manifest"]["files"]:
if f == fm["name"] and "schema_type" in file_doc:
result["files"][fm["name"]] = dict(fm,
body=file_doc,
schema_type=file_doc['schema_type'])
# For all other (non-metadata) files from the bundle manifest, insert them with a default body
# indicating an empty schema type.
for fm in result["manifest"]["files"]:
if fm["name"] not in result["files"]:
result["files"][fm["name"]] = dict(fm,
body=None,
schema_type=None)
# Flatten the file list while preserving order.
result["files"] = list(result["files"].values())
return result | dc42d07ff69a0a6b6f1778064153cd521151da4e | 3,659,586 |
from typing import List
def choices_function() -> List[str]:
"""Choices functions are useful when the choice list is dynamically generated (e.g. from data in a database)"""
return ['a', 'dynamic', 'list', 'goes', 'here'] | 30b4b05435bacc0a42c91a3f0be09a90098a012f | 3,659,587 |
def GetInfraPythonPath(hermetic=True, master_dir=None):
"""Returns (PythonPath): The full working Chrome Infra utility path.
This path is consistent for master, slave, and tool usage. It includes (in
this order):
- Any environment PYTHONPATH overrides.
- If 'master_dir' is supplied, the master's python path component.
- The Chrome Infra build path.
- The system python path.
Args:
hermetic (bool): True, prune any non-system path from the system path.
master_dir (str): If not None, include a master path component.
"""
path = PythonPath()
if master_dir:
path += GetMasterPythonPath(master_dir)
path += GetBuildPythonPath()
path += GetSysPythonPath(hermetic=hermetic)
return path | a43486c68559e42606a3a55444c998640529ef2b | 3,659,588 |
import uuid
def nodeid():
"""nodeid() -> UUID
Generate a new node id
>>> nodeid()
UUID('...')
:returns: node id
:rtype: :class:`uuid.UUID`
"""
return uuid.uuid4() | 88a3ddc335ce2ca07bfc0e2caf8487dc2342e80f | 3,659,589 |
def drop_redundant_cols(movies_df):
"""
Drop the following redundant columns:
1. `release_data_wiki` - after dropping the outlier
2. `revenue` - after using it to fill `box_office` missing values
3. `budget_kaggle` - after using it to fill `budget_wiki` missing values
4. `duration` - after using it to fill `runtime` missing values
Parameters
----------
movies_df : Pandas dataframe
Joined movie data
Returns
-------
Pandas dataframe
Movie data with redundant columns dropped
"""
# Drop record with `release_date` outlier and `release_date_wiki` column
outlier_index = movies_df.loc[(movies_df['release_date_wiki'] > '2000') &
(movies_df['release_date_kaggle'] < '1960')].index
movies_df.drop(outlier_index, inplace=True)
movies_df.drop('release_date_wiki', axis=1, inplace=True)
# Pairs of redundant columns
redundant_pairs = [
['box_office', 'revenue'],
['budget_wiki', 'budget_kaggle'],
['runtime', 'duration']
]
# Fill the first column and drop the second column for each pair
for a, b in redundant_pairs:
movies_df = filla_dropb(a, b, movies_df)
return movies_df | f4fb2c98eafc4ec9074cbc659510af30c7155b9c | 3,659,590 |
import pickle
import gzip
import collections
import re
import fnmatch
def get_msids_for_add_msids(opt, logger):
"""
Parse MSIDs spec file (opt.add_msids) and return corresponding list of MSIDs.
This implements support for a MSID spec file like::
# MSIDs that match the name or pattern are included, where * matches
# anything (0 or more characters) while ? matches exactly one character:
#
aopcadm?
aacccd*
# MSIDs with the same subsystem and sampling rate as given MSIDs are included.
# Example: */1wrat gives all acis4eng engineering telemetry.
*/1wrat
# MSIDs with the same subsystem regardless of sampling rate.
# Example: **/3tscpos gives all engineering SIM telemetry
**/3tscpos
:param opt: options
:param logger: logger
:return: msids_out, msids_content (mapping of MSID to content type)
"""
logger.info(f'Reading available cheta archive MSIDs from {opt.sync_root}')
with get_readable(opt.sync_root, opt.is_url, sync_files['msid_contents']) as (tmpfile, uri):
if tmpfile is None:
# If index_file is not found then get_readable returns None
logger.info(f'No cheta MSIDs list file found at{uri}')
return None
logger.info(f'Reading cheta MSIDs list file {uri}')
msids_content = pickle.load(gzip.open(tmpfile, 'rb'))
content_msids = collections.defaultdict(list)
for msid, content in msids_content.items():
content_msids[content].append(msid)
logger.info(f'Reading MSID specs from {opt.add_msids}')
with open(opt.add_msids) as fh:
lines = [line.strip() for line in fh.readlines()]
msid_specs = [line.upper() for line in lines if (line and not line.startswith('#'))]
logger.info('Assembling list of MSIDs that match MSID specs')
msids_out = []
for msid_spec in msid_specs:
if msid_spec.startswith('**/'):
msid_spec = msid_spec[3:]
content = msids_content[msid_spec]
subsys = re.match(r'([^\d]+)', content).group(1)
for content, msids in content_msids.items():
if content.startswith(subsys):
logger.info(f' Found {len(msids)} MSIDs for **/{msid_spec} with '
f'content = {content}')
msids_out.extend(msids)
elif msid_spec.startswith('*/'):
msid_spec = msid_spec[2:]
content = msids_content[msid_spec]
msids = content_msids[content]
logger.info(f' Found {len(msids)} MSIDs for */{msid_spec} with '
f'content = {content}')
msids_out.extend(msids)
else:
msids = [msid for msid in msids_content if fnmatch(msid, msid_spec)]
if not msids:
raise ValueError(f'no MSID matching {msid} (remember derived params like PITCH '
'must be written as"dp_<MSID>"')
logger.info(f' Found {len(msids)} MSIDs for {msid_spec}')
msids_out.extend(msids)
logger.info(f' Found {len(msids_out)} matching MSIDs total')
return msids_out, msids_content | b7f2a5b9f1452c8f43684223313716253e27848b | 3,659,591 |
def gaussian_similarity(stimulus_representation, i, j, w, c, r):
"""
Function that calculates and returns the gaussian similarity of stimuli i and j (equation 4b in [Noso86]_)
Parameters
----------
stimulus_representation : np.array
The stimuli are given to this function in the form of a n x N matrix, where n is the number of stimuli and N is
the number of dimensions of each stimuli in the psychological space
i : int
Stimulus i
j : int
Stimulus j
w : list
This is the list of weights corresponding to each dimension of the stimulus in the psychological space
c : int
This is the scale parameter used in the distance calculation
r : int
This is the Minkowski's distance metric. A value of 1 corresponds to city-block metric (generally used when the
stimuli has separable dimensions) ; A value of 2 corresponds to eucledian distance metric (generally used when
the stimuli has integral dimensions)
Returns
-------
np.float64
The Gaussian similarity between the two stimulus
"""
def distance():
"""
Calculates the distance between two stimulus (equation 6 in [Noso86]_)
Returns
-------
np.float64
Distance scaled by the scale parameter 'c'
"""
sum = 0.0
N = np.shape(stimulus_representation)[1]
for idx in range(N):
sum += (w[idx] * (stimulus_representation[i, idx] - stimulus_representation[j, idx]) ** r)
sum = sum ** (1 / r)
return c * sum
return np.exp(-(distance()) ** 2) | e1436a26d4f028f237e03d40590cb6b7405b3f16 | 3,659,592 |
def windShearVector(u, v, top, bottom, unit=None):
""" calculate the u and v layer difference and return as vector
"""
udiff = layerDiff(u, top, bottom, unit)
vdiff = layerDiff(v, top, bottom, unit)
return makeVector(udiff, vdiff) | fa9fe1869621c04f00004a8a5c01e78d4faa3221 | 3,659,593 |
def withdraw_entry(contest):
"""Withdraws a submitted entry from the contest.
After this step the submitted entry will be seen as a draft.
"""
return _update_sketch(contest, code=None, action="withdraw") | fdedfeb61e0fe3b47918b66ca1f9dfd56450e39c | 3,659,594 |
def _conditional_field(if_, condition, colon, comment, eol, indent, body,
dedent):
"""Formats an `if` construct."""
del indent, dedent # Unused
# The body of an 'if' should be columnized with the surrounding blocks, so
# much like an inline 'bits', its body is treated as an inline list of blocks.
header_row = _Row('if',
['{} {}{} {}'.format(if_, condition, colon, comment)])
indented_body = _indent_blocks(body)
assert indented_body, 'Expected body of if condition.'
return [_Block([header_row] + eol + indented_body[0].prefix,
indented_body[0].header,
indented_body[0].body)] + indented_body[1:] | 09c357659d5f78946d74cddc83f3e4c5c9cad0ed | 3,659,595 |
def check_sum_cases(nation='England'):
"""check total data"""
ck=LocalLatest()
fail=False
data=ck.data.get('data')
latest={}
data=clean_cases(data) #repair glitches
#check latest data matches stored data for nation
for i in data:
_code=i['areaCode']
latest[_code]=i
try:
_nation=ons_week.nation[_code]
except Exception as e:
log.error(e)
log.error(i['areaName'])
continue
if _nation==nation:
if _code in ons_week.stored_names:
place=ons_week.stored_names[_code]
_total=DailyCases.objects.filter(areaname=place).aggregate(Max('totalLabConfirmedCases')).get('totalLabConfirmedCases__max')
_latest=i['cumCasesByPublishDate']
if _total !=_latest:
print(f'Mismatch: {place} Latest total{_latest} != stored {_total}')
fail=True
else:
#print(f'{place} up to date')
pass
else:
place=i['areaName']
print(f'{place} not counted / not in TR tally')
sumtotal=0
for _code in ons_week.stored_names:
if ons_week.nation[_code]==nation:
i=latest.get(_code)
if i:
_latest=i['cumCasesByPublishDate']
_total=DailyCases.objects.filter(areacode=_code).aggregate(Max('totalLabConfirmedCases')).get('totalLabConfirmedCases__max')
if _latest!=_total:
print(f'Mismatch: {_code} Latest total{_latest} != stored {_total}')
else:
if _latest:
sumtotal +=_latest
else:
print(f'Missing place {_code} in PHE published cases')
print(f'Sum total of stored names for {nation} is {sumtotal}')
return fail | 5f30d4a856c21c1397e2f1cdd9a0ee03d026b5a2 | 3,659,596 |
def get_module_name() -> str:
"""Gets the name of the module that called a function
Is meant to be used within a function.
:returns: The name of the module that called your function
"""
return getmodulename(stack()[2][1]) | 12541aa8445ebd796657d76d3001523882202ea0 | 3,659,597 |
def hand_points(work_hand):
"""returns the point value of a given hand"""
debug_level = 1
work_points = 0
for card in work_hand:
work_points += card_point_value(card)
return work_points | 996c0f9f1836495d99836eab8ef3a26e7b592be6 | 3,659,598 |
def elements_counter(arr, count=0):
"""递归计算列表包含的元素数
Arguments:
arr {[list]} -- [列表]
Keyword Arguments:
count {int} -- [列表包含的元素数] (default: {0})
Returns:
[int] -- [列表包含的元素数]
"""
if len(arr):
arr.pop(0)
count += 1
return elements_counter(arr, count)
return count | 80809781fd2d6a7a2fa92a4b7d5713771a07f8eb | 3,659,599 |
Subsets and Splits