content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def q_fn(x):
"""
The Q-function assesses all possible actions that can be taken, given a state.
Two layer feed forward neural network. All layers are fully connected, biases initialized with 0.
The constants above define the layer sizes.
:param x: Batch input tensor to the network.
:return: Action softmax over three values.
"""
with tf.variable_scope('dense1') as scope:
weights = tf.get_variable('weights', [INPUT_SIZE, DENSE1_UNITS], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=1.0 / DENSE1_UNITS))
biases = tf.get_variable('biases', shape=[DENSE1_UNITS], dtype=tf.float32,
initializer=tf.constant_initializer(0.0, dtype=tf.float32))
pre_activation = tf.add(tf.matmul(x, weights), biases, name='pre_activation')
dense1 = tf.sigmoid(pre_activation, name=scope.name)
with tf.variable_scope('dense2') as scope:
weights = tf.get_variable('weights', [DENSE1_UNITS, DENSE2_UNITS], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=1.0 / DENSE2_UNITS))
biases = tf.get_variable('biases', shape=[DENSE2_UNITS], dtype=tf.float32,
initializer=tf.constant_initializer(0.0, dtype=tf.float32))
pre_activation = tf.add(tf.matmul(dense1, weights), biases, name='pre_activation')
dense2 = tf.sigmoid(pre_activation, name=scope.name)
with tf.variable_scope('actions') as scope:
weights = tf.get_variable('weights', [DENSE2_UNITS, NUM_ACTIONS], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=1.0 / NUM_ACTIONS))
biases = tf.get_variable('biases', shape=[NUM_ACTIONS], dtype=tf.float32,
initializer=tf.constant_initializer(0.0, dtype=tf.float32))
action_q = tf.add(tf.matmul(dense2, weights), biases, name='action_q_value')
return action_q | 4c4fece48773d020c321fd433aa75caa7bc258ee | 20,081 |
def getPlayer(env, name, decoder):
"""Get user's player data"""
players = getPlayers(env, decoder)
if name in players.keys():
return players[name]
else:
return False | fd98e481abab6e7f53bf66d0285f506d06f5e6ea | 20,082 |
import re
def get_config_errors(conf, filename="<no name>"):
"""
Validate a configuration object and return the list of errors found.
"""
rv = []
# Give a clearer error message than what jsonschema would give
# Something like: None is not of type 'object'
if not isinstance(conf, dict):
msg = "config must be an object containing 'db_objects'"
rv.append(located_message(None, filename, msg))
return rv
errors = list(validator.iter_errors(conf))
for error in errors:
loc = location_from_error(conf, error)
rv.append(located_message(loc, filename, error.message))
for obj in conf.get("db_objects", ()):
if isinstance(obj, dict):
rv.extend(_get_rule_errors(obj, filename))
# sort by line number
def lineno(s):
m = re.search(r":(\d+)", s)
return int(m.group(1)) if m is not None else 0
rv.sort(key=lineno)
return rv | 4020d5dd33f840dc6c0e3c24be77acf80a606d57 | 20,083 |
def process_vcf( info ):
"""
pass izip object of line object and other needed vars
info[0] = list of vcf lines from VCF object iterator.
info[1] = clf object
info[2] = dataset dictionary
info[3] = filter arg supplied by user
info[4] = min classification frequency supplied by user (defaults to None)
"""
#sys.stderr.write("... running process VCF with job id %d \n" %(os.getpid() ) )
#parse the args to function
line_list = info[0] #list of lines from VCF obj
clf = info[1] #randomForest object
dataset = info[2] #dataset with class names
filter = info[3] #filter arg supplied by user
minclassfreq = info[4]
#iterate over lines in the chunked data
return_list = []
for line in line_list:
line = line.strip().split("\t")
vdat = parse_vcf_data( line[7] ) #parse all of vcf appended data
filter_bool = run_filters( vdat, filtering=filter ) #boolean of whether line info passes filters
if filter_bool:
_x = vdat[ 'AT' ].split(",") #create list from data in 'AT' field
_x = _x[1:]
#results = classify_data( _x, clf, dataset['target_names'] )
results = classify_data( _x, clf, dataset['target_names'], minclassfreq )
line[7] = line[7] + ";" + results #append data to correct vcf column
#print "\t".join( line ) #print results to stdout
print_line = "\t".join( line )
return_list.append( print_line )
else:
return_list.append( None )
#return the full list of updated line data
return( return_list ) | 389146cd88804935ee2aae85a9e5d84684f81b7e | 20,084 |
def compute_task1_f1_score(truth, solutions):
""" compute f1 score for task 1
:param truth: list of ground truth values for all problem-ids
:param solutions: list of solutions for all problem-ids
:return: f1 score
"""
task1_truth, task1_solution = extract_task_results(truth, solutions, 'multi-author')
return f1_score(task1_truth, task1_solution, average='micro') | 2e44603c547062d85023fb405d3ab511d3ca40d3 | 20,086 |
def multiply(t1,t2):
"""
Multiplies (expands) two binary expressions t1 and t2 based on the distributive rule
Args:
t1 (str): first binary expression
t2 (str): second binary expression
Returns:
A string representing the expansion of the boolean algebraic expressions
"""
t1 = t1.split('+')
t2 = t2.split('+')
prod = ''
for m in t1:
temp = ""
for n in t2:
if t1.index(m) == len(t1)-1 and t2.index(n) == len(t2)-1:
if m!=n:
temp=(temp+m+n)
else:
temp += m
else:
if m!=n:
temp=temp + m+n+'+'
else:
temp+=m+'+'
prod+=temp
return prod | 0078ee94420722600be31edc74a86b1932c4d2f2 | 20,087 |
def source_remove_all(obj_type, obj_id, name, analyst=None):
"""
Remove a source from a top-level object.
:param obj_type: The CRITs type of the top-level object.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param name: The name of the source.
:type name: str
:param analyst: The user performing the removal.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(obj_type, obj_id)
if not obj:
return {'success': False,
'message': 'Unable to find object in database.'}
try:
result = obj.remove_source(source=name,
remove_all=True)
obj.save(username=analyst)
return result
except ValidationError, e:
return {'success':False, 'message': e} | c9c7860f27b230ea9834c4a58324da70ebee9e30 | 20,088 |
def driver(dbname):
"""
Determine driver module
:Parameters:
`dbname` : ``str``
DB name (section token in db.conf)
:Return: Driver module
:Rtype: ``module``
:Exceptions:
- `DBConfigurationError` : DB not configured
- `KeyError` : DB name not found
- `ImportError` : Driver not found
"""
return _connection.driver(dbname) | 01a2b0e10975879f944bbaf51b44d8eef6b97996 | 20,089 |
from typing import Union
def d1tile_x_d2(d1: Union[float, np.ndarray],
d2: np.ndarray) -> np.ndarray:
"""
Create array of repeated values with dimensions that match those of energy array
Useful to multiply frequency-dependent values to frequency-time matrices
:param d1: 1D input vector, nominally frequency/scale multipliers
:param d2: 2D array, first dimension should be that same as d1
:return: array with matching values
"""
shape_out = d2.shape
if len(shape_out) == 1:
d1_matrix = np.tile(d1, (shape_out[0]))
elif len(shape_out) == 2:
d1_matrix = np.tile(d1, (shape_out[1], 1)).T
else:
raise TypeError('Cannot handle an array of shape {}.'.format(str(d1.shape)))
if d1_matrix.shape == d2.shape:
d1_x_d2 = d1_matrix * d2
else:
raise TypeError('Cannot handle an array of shape {}.'.format(str(d1.shape)))
return d1_x_d2 | 68721f7f9ab1b60f77e8199ad917dd47b19aaa95 | 20,090 |
def get_all_gradients_for_Q4( theta, X, Y ):
"""
Do the same thing as Q(iv) but it is actually only for storing and
observing the sample gradient and whole gradient for the Q(iv) step
Output the sample grdient and whole grdient data
"""
# Get difference of uclidean distance
def get_difference( old_theta, new_theta ):
difference_mat = old_theta - new_theta
difference_square = np.multiply( difference_mat, difference_mat )
difference = math.sqrt( np.sum( difference_square ) )
return difference
# Contains all gradient_i
grad_i_val_observe = []
grad_val_observe = []
# Set random seed
random.seed( 1 )
# Get updated theta
def get_new_theta( old_theta, eta ):
# Code for using single sample gradient
random_i = random.randint( 0, X.shape[0] - 1 )
grad_i_val = get_grad_f_i( old_theta, X, Y, random_i )
# Get the whole gradient to observe
grad_val = get_grad_f( old_theta, X, Y )
# Scale by the size N (multiply by 10,000)
grad_i_val = grad_i_val * X.shape[0]
# Store grad_val to observe Q(v)
grad_i_val_list = grad_i_val.tolist()
grad_i_val_list = grad_i_val_list[0]
grad_val_list = grad_val.tolist()
grad_val_list = grad_val_list[0]
grad_i_val_observe.append( grad_i_val_list )
grad_val_observe.append( grad_val_list )
new_theta = old_theta - ( eta * grad_i_val )
return new_theta
############################################################
precision = 0.01 #
eta = 0.000000008 #
############################################################
old_theta = theta
new_theta = get_new_theta( old_theta, eta )
difference = get_difference( old_theta, new_theta )
while difference > precision:
old_theta = new_theta
new_theta = get_new_theta( old_theta, eta )
# Get new difference
difference = get_difference( old_theta, new_theta )
value = op_func( new_theta, X, Y )
# Showing information...
print
print "difference: " + str( difference )
print "theta: "
print new_theta
print "function value: " + str( value )
return grad_i_val_observe, grad_val_observe | d508b826f552d844cf95f9d5515c5eb1512dfbcb | 20,091 |
def findSubsetIndices(min_lat,max_lat,min_lon,max_lon,lats,lons):
"""Array to store the results returned from the function"""
res=np.zeros((4),dtype=np.float64)
minLon=min_lon; maxLon=max_lon
distances1 = []; distances2 = []
indices=[]; index=1
for point in lats:
s1 = max_lat-point # (vector subtract)
s2 = min_lat-point # (vector subtract)
distances1.append((np.dot(s1, s1), point, index))
distances2.append((np.dot(s2, s2), point, index-1))
index=index+1
distances1.sort()
distances2.sort()
indices.append(distances1[0])
indices.append(distances2[0])
distances1 = []; distances2 = []; index=1
for point in lons:
s1 = maxLon-point # (vector subtract)
s2 = minLon-point # (vector subtract)
distances1.append((np.dot(s1, s1), point, index))
distances2.append((np.dot(s2, s2), point, index-1))
index=index+1
distances1.sort()
distances2.sort()
indices.append(distances1[0])
indices.append(distances2[0])
""" Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices"""
minJ=indices[1][2]
maxJ=indices[0][2]
minI=indices[3][2]
maxI=indices[2][2]
res[0]=minI; res[1]=maxI; res[2]=minJ; res[3]=maxJ;
return res | 7d41b33fd492fa8b5cd62c368210bcbea2c7bc89 | 20,092 |
def call(stoptime, seconds, method=None):
"""
Returns a dict with route, direction, stop, call time and source.
Call time is in UTC.
"""
result = dict(stoptime._asdict(), call_time=toutc(seconds), source=method or "I")
result["deviation"] = result["call_time"] - stoptime.datetime
return result | bfa78ec89b60bf1140b8b24bc91fd1a2dd9c2a63 | 20,093 |
import re
def sanitize_value(val):
"""Remove crap from val string and then convert it into float"""
val = re.sub(u"(\xa0|\s)", '', val)
val = val.replace(',', '.')
# positive or negative multiplier
mult = 1
if '-' in val and len(val) > 1:
mult = -1
val = val.replace('-', '')
elif '-' in val:
val = '0'
if val is not None:
if '%' in val:
val = float(val.replace('%', ''))
return float(val) * mult | 0fc67bf519674575451f4fc029bee658ea2bd2da | 20,094 |
def getObjectInfo(fluiddb, about):
"""
Gets object info for an object with the given about tag.
"""
return fluiddb.about[about].get() | 8614edaf44944fcc11882ac2fcaa31ba31d48d30 | 20,095 |
import warnings
def __getattr__(name):
"""Get attribute."""
deprecated = __deprecated__.get(name)
if deprecated:
warnings.warn(
"'{}' is deprecated. Use '{}' instead.".format(name, deprecated[0]),
category=DeprecationWarning,
stacklevel=(3 if PY37 else 4)
)
return deprecated[1]
raise AttributeError("module '{}' has no attribute '{}'".format(__name__, name)) | c904f1221492e8f08786918dd496d3d6861fd35e | 20,096 |
def get_model_and_assets():
"""Returns a tuple containing the model XML string and a dict of assets."""
return common.read_model('finger.xml'), common.ASSETS | cacb53de08eef5695a5464fbb72a1706ff489276 | 20,097 |
async def process_logout():
"""
Purge the login information from the users session/cookie data
:return: Redirect to main body
"""
# Simply destroy the cookies in this session and get rid of the creds, redirect to landing
response = RedirectResponse("/") # Process the destruction from main app/test result
response.delete_cookie("user")
response.delete_cookie("flow")
return response | 98d9c8c40e0a1fe224538b353a2a803fdd7fce76 | 20,098 |
from typing import Optional
def _lex_label(label: str) -> _LexedLabel:
"""Splits the label into packages and target."""
match = _LABEL_LEXER.match(label)
if match is None:
raise ValueError(f'{label} is not an absolute Bazel label')
groups = match.groupdict()
packages: Optional[str] = groups['packages']
target: Optional[str] = groups['target']
if packages is None and target is None:
raise ValueError(f'{label} cannot be empty')
init = packages.split('/') if packages else []
last = target[1:] if target else init[-1]
return init, last | f067d5e81b02a4242d8459b41d49c302459f416b | 20,099 |
def generate_extra(candidate: tuple, expansion_set, murder_list=None, attempted=None) -> list:
"""
Special routine for graph based algorithm
:param candidate:
:param expansion_set:
:param murder_list:
:param attempted:
:return:
"""
check = manufacture_lambda(attempted, murder_list)
accepted_sets = list()
for regular_constraint in expansion_set:
val = list(candidate)
val.append(regular_constraint)
future_child = tuple(sorted(val))
if check(future_child):
accepted_sets.append(future_child)
return accepted_sets | e38d605df2a562b269189c8e7714ec97e89d8f36 | 20,100 |
from typing import Tuple
from typing import Dict
def extract_oe_stereochemistry(
molecule: Molecule, oe_mol: "OEMol"
) -> Tuple[Dict[int, AtomStereochemistry], Dict[int, BondStereochemistry]]:
"""Extracts the CIP stereochemistry of each atom and bond in a OE molecule."""
atom_stereo = {
oe_atom.GetIdx(): atom_cip_stereochemistry(oe_mol, oe_atom)
for oe_atom in oe_mol.GetAtoms()
}
bond_stereo_tuples = {
tuple(
sorted([oe_bond.GetBgnIdx(), oe_bond.GetEndIdx()])
): bond_cip_stereochemistry(oe_mol, oe_bond)
for oe_bond in oe_mol.GetBonds()
}
bond_stereo = {
i: bond_stereo_tuples[tuple(sorted([bond.atom1_index, bond.atom2_index]))]
for i, bond in enumerate(molecule.bonds)
}
return atom_stereo, bond_stereo | 0d051e847c94a81585a1478fc10bfac335d700a6 | 20,101 |
from dipy.denoise.nlmeans import nlmeans
from scipy.ndimage.morphology import binary_erosion
from scipy import ndimage
def nlmeans_proxy(in_file, settings,
snr=None,
smask=None,
nmask=None,
out_file=None):
"""
Uses non-local means to denoise 4D datasets
"""
if out_file is None:
fname, fext = op.splitext(op.basename(in_file))
if fext == '.gz':
fname, fext2 = op.splitext(fname)
fext = fext2 + fext
out_file = op.abspath('./%s_denoise%s' % (fname, fext))
img = nb.load(in_file)
hdr = img.header
data = img.get_data()
aff = img.affine
if data.ndim < 4:
data = data[..., np.newaxis]
data = np.nan_to_num(data)
if data.max() < 1.0e-4:
raise RuntimeError('There is no signal in the image')
df = 1.0
if data.max() < 1000.0:
df = 1000. / data.max()
data *= df
b0 = data[..., 0]
if smask is None:
smask = np.zeros_like(b0)
smask[b0 > np.percentile(b0, 85.)] = 1
smask = binary_erosion(
smask.astype(np.uint8), iterations=2).astype(np.uint8)
if nmask is None:
nmask = np.ones_like(b0, dtype=np.uint8)
bmask = settings['mask']
if bmask is None:
bmask = np.zeros_like(b0)
bmask[b0 > np.percentile(b0[b0 > 0], 10)] = 1
label_im, nb_labels = ndimage.label(bmask)
sizes = ndimage.sum(bmask, label_im, range(nb_labels + 1))
maxidx = np.argmax(sizes)
bmask = np.zeros_like(b0, dtype=np.uint8)
bmask[label_im == maxidx] = 1
nmask[bmask > 0] = 0
else:
nmask = np.squeeze(nmask)
nmask[nmask > 0.0] = 1
nmask[nmask < 1] = 0
nmask = nmask.astype(bool)
nmask = binary_erosion(nmask, iterations=1).astype(np.uint8)
den = np.zeros_like(data)
est_snr = True
if snr is not None:
snr = [snr] * data.shape[-1]
est_snr = False
else:
snr = []
for i in range(data.shape[-1]):
d = data[..., i]
if est_snr:
s = np.mean(d[smask > 0])
n = np.std(d[nmask > 0])
snr.append(s / n)
den[..., i] = nlmeans(d, snr[i], **settings)
den = np.squeeze(den)
den /= df
nb.Nifti1Image(den.astype(hdr.get_data_dtype()), aff,
hdr).to_filename(out_file)
return out_file, snr | 69629ef536830ccecdee09e5acfadf02d892cc9d | 20,102 |
def jointImgTo3D(sample):
"""
Normalize sample to metric 3D
:param sample: joints in (x,y,z) with x,y in image coordinates and z in mm
:return: normalized joints in mm
"""
ret = np.zeros((3,), np.float32)
# convert to metric using f
ret[0] = (sample[0]-centerX)*sample[2]/focalLengthX
ret[1] = (sample[1]-centerY)*sample[2]/focalLengthY
ret[2] = sample[2]
return ret | 43726ed712e268c9fd2434fa2734ff8aa0ee2d0a | 20,104 |
def build_param_obj(key, val, delim=''):
"""Creates a Parameter object from key and value, surrounding key with delim
Parameters
----------
key : str
* key to use for parameter
value : str
* value to use for parameter
delim : str
* str to surround key with when adding to parameter object
Returns
-------
param_obj : :class:`taniumpy.object_types.parameter.Parameter`
* Parameter object built from key and val
"""
# create a parameter object
param_obj = taniumpy.Parameter()
param_obj.key = '{0}{1}{0}'.format(delim, key)
param_obj.value = val
return param_obj | 0fba11c4564ef57eab45ffd02bed887c42a14121 | 20,106 |
def copy_fixtures_to_matrixstore(cls):
"""
Decorator for TestCase classes which copies data from Postgres into an
in-memory MatrixStore instance. This allows us to re-use database fixtures,
and the tests designed to work with those fixtures, to test
MatrixStore-powered code.
"""
# These methods have been decorated with `@classmethod` so we need to use
# `__func__` to get a reference to the original, undecorated method
decorated_setUpClass = cls.setUpClass.__func__
decorated_tearDownClass = cls.tearDownClass.__func__
def setUpClass(inner_cls):
decorated_setUpClass(inner_cls)
matrixstore = matrixstore_from_postgres()
stop_patching = patch_global_matrixstore(matrixstore)
# Have to wrap this in a staticmethod decorator otherwise Python thinks
# we're trying to create a new class method
inner_cls._stop_patching = staticmethod(stop_patching)
new_settings = override_settings(
CACHES={
"default": {"BACKEND": "django.core.cache.backends.dummy.DummyCache"}
}
)
new_settings.enable()
inner_cls._new_settings = new_settings
def tearDownClass(inner_cls):
inner_cls._stop_patching()
inner_cls._new_settings.disable()
decorated_tearDownClass(inner_cls)
cls.setUpClass = classmethod(setUpClass)
cls.tearDownClass = classmethod(tearDownClass)
return cls | b64ef9b23afc76b8f1b2cf1ae6b56635cd6e4f56 | 20,107 |
def intersect_description(first, second):
"""
Intersect two description objects.
:param first: First object to intersect with.
:param second: Other object to intersect with.
:return: New object.
"""
# Check that none of the object is None before processing
if first is None:
return second
if second is None:
return first
if first.description_type == second.description_type:
# Same MIME types, can merge content
value = let_user_choose(first.value, second.value)
description_type = first.description_type
else:
# MIME types are different, set MIME type to text
description_type = 'text/enriched'
value = """
Original MIME-type for first description: '{0}'.
{1}
----
Original MIME-type for second description: '{2}'.
{3}
""".format(first.description_type, first.value,
second.description_type, second.value)
return Description(value, description_type) | 93d35314f8ab6ef0978de942ecfad3719c8f4971 | 20,108 |
def smooth_correlation_matrix(cor, sigma, exclude_diagonal=True):
"""Apply a simple gaussian filter on a correlation matrix.
Parameters
----------
cor : numpy array
Correlation matrix.
sigma : int, optional
Scale of the gaussian filter.
exclude_diagonal : boolean, optional
Whether to exclude the diagonal from the smoothing. That is what should
be done generally because the diagonal is 1 by definition.
Returns
-------
cor_new : numpy array
Smoothed correlation matrix.
"""
n_dim = len(np.diag(cor))
cor_new = np.copy(cor)
if exclude_diagonal:
cor_new[0, 0] = 0.5 * (cor[0, 1] + cor[1, 0])
cor_new[n_dim - 1, n_dim - 1] = 0.5 * (cor[n_dim - 1, n_dim - 2] +
cor[n_dim - 2, n_dim - 1])
for i in range(1, n_dim - 1):
cor_new[i, i] = 0.25 * (cor[i, i - 1] + cor[i, i + 1] +
cor[i - 1, i] + cor[i + 1, i])
cor_new = gaussian_filter(cor_new, sigma, mode='nearest')
if exclude_diagonal:
for i in range(n_dim):
cor_new[i, i] = cor[i, i]
return cor_new | 753337cc12578b5c2333392f01e028204ac2f0e0 | 20,109 |
def quantize_iir_filter(filter_dict, n_bits):
"""
Quantize the iir filter tuple for sos_filt funcitons
Parameters:
- filter_dict: dict, contains the quantized filter dictionary with the following keys:
- coeff: np.array(size=(M, 6)), float representation of the coefficients
- coeff_scale: np.array(size=(M, 2)), scale all coefficients, not used here
- coeff_shift: np.array(size=(M, 2), dtype=int), amount to shift during computation
- y_scale: float, scale factor of the output, unused here
- y_shift: int, number of bits to shift the output for scaling
- n_bits: int, number of bits to represent the filter coefficients
Returns: tuple:
- a: np.array(size=(M+1, 3), dtype=int), quantized nominators
- a_shift: np.array(size=(M+1), dtype=int), amount to shift during computation
- b: np.array(size=(M+1, 3), dtype=int), quantized denumerators
- b_shift: np.array(size=(M+1), dtype=int), amount to shift during computation
- y_shift: int, amount to shift the output
"""
quant_coeff = filter_dict["coeff"]
scale_coeff = filter_dict["coeff_scale"]
comp_shift = filter_dict["coeff_shift"]
output_shift = filter_dict["y_shift"]
M = quant_coeff.shape[0]
assert quant_coeff.shape == (M, 6)
assert scale_coeff.shape == (M, 2)
assert comp_shift.shape == (M, 2)
assert comp_shift.dtype == int
assert np.all(comp_shift <= 0)
# generate the coefficients
a = np.ones((M + 1, 3), dtype=int) << (n_bits - 1)
b = np.ones((M + 1, 3), dtype=int) << (n_bits - 1)
a_shift = np.ones((M + 1, ), dtype=int) * (n_bits - 1)
b_shift = np.ones((M + 1, ), dtype=int) * (n_bits - 1)
for m in range(M):
a[m + 1, :] = quantize_to_int(quant_coeff[m, 3:], scale_coeff[m, 1], n_bits)
b[m + 1, :] = quantize_to_int(quant_coeff[m, :3], scale_coeff[m, 0], n_bits)
a_shift[m + 1] = -comp_shift[m, 1]
b_shift[m + 1] = -comp_shift[m, 0]
return a, a_shift, b, b_shift, output_shift | a8e93302072733d77acb563cd758725f14c05420 | 20,110 |
import json
import traceback
def add_goods(request, openid, store_id, store_name, dsr,
specification, brand, favorable_rate, pic_path, live_recording_screen_path, daily_price, commission_rate,
pos_price, preferential_way, goods_url, hand_card,
storage_condition, shelf_life, unsuitable_people, ability_to_deliver, shipping_cycle, shipping_addresses,
delivery_company, not_shipping):
"""
:request method: POST
商铺信息
:param store_id: 店铺id(最长45位)
:param store_name: 店铺id(最长45位)
:param dsr: 店铺评分
商品信息
:param goods_name: 商品名称
:param specification: 规格
:param brand: 商品品牌
:param favorable_rate: 好评率
:param pic_path: 商品主图链接(列表)
:param live_recording_screen_path: 知名主播带货视频链接
:param daily_price: 日常价格
:param live_price: 直播价格
:param commission_rate: 直播佣金比例
:param pos_price: 坑位费预算
:param preferential_way: 直播活动机制
:param goods_url: 商品链接
:param hand_card: 直播手卡
全网比价
:param tmall_price: 天猫价格
:param taobao_price: 淘宝价格
:param jd_price: 京东
:param pdd_price: 拼多多
:param offline_price: 线下商超
存储与运输
:param storage_condition: 存储条件
:param shelf_life: 保质期
:param unsuitable_people: 不适用人群
:param ability_to_deliver: 发货能力
:param shipping_cycle: 发货周期
:param shipping_addresses: 发货地址
:param delivery_company: 物流快递公司
:param not_shipping: 不发货区域
:param free_shipping: 包邮地区
其他
:param comment: 备注信息
:return:
{'code': ResponsCode.FAILED, 'data': '', "msg": '添加商品失败'}
{'code': ResponsCode.SUCCESS, 'data': {"goods_id": pk}, "msg": '添加商品成功'}
{'code': ResponsCode.EXCEPTION, 'data': '', "msg": '添加商品异常'}
"""
rsp = {'code': ResponsCode.FAILED, 'data': '', "msg": '添加商品失败'}
try:
_, data = get_store_data_by_store_id(openid, store_id)
if not data:
is_success = insert_store_info(store_id, store_name, dsr, openid, ignore=True)
if not is_success:
raise InvalidParameter('店铺不存在,且新建失败')
is_success, pk = insert_goods_data(openid, json.loads(request.body))
if is_success:
rsp = {'code': ResponsCode.SUCCESS, 'data': {"goods_id": pk}, "msg": '添加商品成功'}
except InvalidParameter as e:
rsp = {'code': ResponsCode.FAILED, 'data': '', "msg": str(e)}
except:
logger.exception(traceback.format_exc())
rsp = {'code': ResponsCode.EXCEPTION, 'data': '', "msg": '添加商品异常'}
finally:
return rsp | e7a04a316e3fba3a803f20eb459dc8691ccc2642 | 20,111 |
def calcOneFeatureEa(dataSet: list, feature_idx: int):
"""
获取一个特征的E(A)值
:param dataSet: 数据集
:param feature_idx: 指定的一个特征(这里是用下标0,1,2..表示)
:return:
"""
attrs = getOneFeatureAttrs(dataSet, feature_idx)
# 获取数据集的p, n值
p, n = getDatasetPN(dataSet)
ea = 0.0
for attr in attrs:
# 获取每个属性值对应的p, n值
attrP, attrN = getOneFeatureAttrPN(dataSet, feature_idx, attr)
# 计算属性对应的ipn
attrIPN = calcIpn(attrP, attrN)
ea += (attrP+attrN)/(p+n) * attrIPN
return ea | fc800b285bc24246ad9c40070d33ff429e395183 | 20,115 |
def translate_mapping(mapping: list, reference: SimpleNamespace, templ: bool=True, nontempl: bool=True,
correctframe: bool=True, filterframe: bool=True, filternonsense: bool=True):
"""
creates a protein mapping from a dna mapping.
:param mapping: a list/tuple of ops.
:param reference: the reference object to which the mapping is relative.
:param templ: include templated ops
:param nontempl: include nontemplated ops
:param correctframe: removes isolated ops that disrupt the frame
:param filterframe: don't return a mapping if there are remaining frameshifts.
:param filternonsense: don't return a mapping if contains a stop codon
:return:
"""
# create a mapping with the appropriate SNPs
base_mapping = []
if templ:
base_mapping.extend(templated(mapping, reference))
if nontempl:
base_mapping.extend(nontemplated(mapping, reference))
base_mapping.sort(key=lambda x: x[0])
# correct errors
if correctframe:
base_mapping = error_scrub(base_mapping)
# filter for whether it is in frame or not.
if filterframe and not len(transform(reference.seq, base_mapping)) % 3 == len(reference.seq) % 3:
return []
protein = translate(transform(reference.seq, base_mapping), offset=reference.offset)
if filternonsense and "_" in protein:
return []
protein_alns = align_proteins(reference.protein, protein)
return protein_alns | 3a05ae38d9bccb8b855c91af850a92426c5031f3 | 20,116 |
from copy import copy
from numpy import zeros, unique
from itertools import product
def trainModel(label,bestModel,obs,trainSet,testSet,modelgrid,cv,optMetric='auc'):
""" Train a message classification model """
pred = zeros(len(obs))
fullpred = zeros((len(obs),len(unique(obs))))
model = copy(bestModel.model)
#find the best model via tuning grid
for tune in [dict(zip(modelgrid, v)) for v in product(*modelgrid.values())]:
for k in tune.keys():
setattr(model,k,tune[k])
i = 0
for tr, vl in cv:
model.fit(trainSet.ix[tr].values,obs[tr])
pred[vl] = model.predict_proba(trainSet.ix[vl].values)[:,1]
fullpred[vl,:] = model.predict_proba(trainSet.ix[vl].values)
i += 1
bestModel.updateModel(pred,fullpred,obs,model,trainSet.columns.values,tune,optMetric=optMetric)
#re-train with all training data
bestModel.model.fit(trainSet.values,obs)
print bestModel
return {label: {'pred': pred, 'test_pred':bestModel.model.predict_proba(testSet)[:,1]}} | 8cea9f0044246972e80684fac584693a500198cc | 20,117 |
def get_device_state():
"""Return the device status."""
state_cmd = get_adb_command_line('get-state')
return execute_command(
state_cmd, timeout=RECOVERY_CMD_TIMEOUT, log_error=True) | e517e7df3f5a7a1bf3925a46ce6a780dbc862910 | 20,118 |
def character_state(combat, character):
"""
Get the combat status of a single character, as a tuple of
current_hp, max_hp, total healing
"""
max_hp = Max_hp(character.base_hp)
total_h = 0
for effect in StatusEffect.objects.filter(character=character, combat=combat, effect_typ__typ='MAX_HP'):
max_hp.hp += effect.effect_val
current_hp = Current_hp(max_hp.hp)
for wound in Wound.objects.filter(character=character, combat=combat):
current_hp.hp -= wound.amount
for heal in Heal.objects.filter(character=character, combat=combat):
current_hp.hp += heal.amount
total_h += heal.amount
return current_hp, max_hp, total_h | d80315934ac653d34dd73cc1a9861b9c6e2f2c9c | 20,119 |
def load_textfile(path) :
"""Returns text file as a str object
"""
f=open(path, 'r')
recs = f.read() # f.readlines()
f.close()
return recs | 8e12a93bb4918cbae7d7e9aad6f09f562eca0c16 | 20,120 |
import scipy
def interp1d_to_uniform(x, y, axis=None):
"""Resample array to uniformly sampled axis.
Has some limitations due to use of scipy interp1d.
Args:
x (vector): independent variable
y (array): dependent variable, must broadcast with x
axis (int): axis along which to resample
Returns:
xu: uniformly spaced independent variable
yu: dependent resampled at xu
"""
x = np.asarray(x)
y = np.asarray(y)
if axis is None:
axis = mathx.vector_dim(x)
num = x.shape[axis]
mn = x.min(axis, keepdims=True)
mx = x.max(axis, keepdims=True)
# Limitation of scipy interp1d
x = x.squeeze()
mn = mn.squeeze()
mx = mx.squeeze()
assert x.ndim == 1
xu = np.arange(num)/(num - 1)*(mx - mn) + mn
yu = scipy.interpolate.interp1d(x.squeeze(), y, axis=axis, bounds_error=False)(xu)
return mathx.reshape_vec(xu, axis), yu | 379071e0e0b718b4d4f8cc970a2b098cf3cab155 | 20,121 |
from typing import Dict
def flatten_dict(d: Dict):
"""Recursively flatten dictionaries, ordered by keys in ascending order"""
s = ""
for k in sorted(d.keys()):
if d[k] is not None:
if isinstance(d[k], dict):
s += f"{k}|{flatten_dict(d[k])}|"
else:
s += f"{k}|{d[k]}|"
return s | 26663b52ccda2a695aa2367cbaf324698a47d56a | 20,123 |
def getPVvecs(fname):
"""
Generates an ensemble of day long PV activities, sampled 3 different
days for each complete pv data set
"""
datmat = np.zeros((18,48))
df = dd.read_csv(fname)
i = 0
for unique_value in df.Substation.unique():
ttemp, ptemp = PVgettimesandpower("2014-06", unique_value, fname)
t, p = trimandshift(ttemp, ptemp)
datmat[i,:] = np.array(p)
i += 1
ttemp, ptemp = PVgettimesandpower("2014-07", unique_value, fname)
t, p = trimandshift(ttemp, ptemp)
datmat[i,:] = np.array(p)
i += 1
ttemp, ptemp = PVgettimesandpower("2014-08", unique_value, fname)
t, p = trimandshift(ttemp, ptemp)
datmat[i,:] = np.array(p)
i += 1
return datmat | 322cf6d29d4104953678ec5e4dfbd5a82564ce1c | 20,124 |
def vis9(n): # DONE
"""
O OO OOO
OO OOO OOOO
OOO OOOO OOOOO
Number of Os:
6 9 12"""
result = 'O' * (n - 1) + 'O\n'
result += 'O' * (n - 1) + 'OO\n'
result += 'O' * (n - 1) + 'OOO\n'
return result | c06c9fdf5d71ef89ce83d5fc2136b9854f018988 | 20,125 |
def derivative_circ_dist(x, p):
"""
Derivative of circumferential distance and derivative function, w.r.t. p
d/dp d(x, p) = d/dp min_{z in [-1, 0, 1]} (|z + p - x|)
Args:
x (float): first angle
p (float): second angle
Returns:
float: d/dp d(x, p)
"""
# pylint: disable=chained-comparison,misplaced-comparison-constant
t = p - x
if t < -0.5 or (0 < t and t < 0.5):
return -1
if t > 0.5 or (-0.5 < t and t < 0):
return 1
return 0 | 36a4cc04cda32e8c6e5896d405f96068def8be41 | 20,126 |
def get_MB_compatible_list(OpClass, lhs, rhs):
""" return a list of metablock instance implementing an operation of
type OpClass and compatible with format descriptor @p lhs and @p rhs
"""
fct_map = {
Addition: get_Addition_MB_compatible_list,
Multiplication: get_Multiplication_MB_compatible_list
}
return fct_map[OpClass](lhs, rhs) | 172ca13f218f52e5834592fd09abf9444369d60c | 20,127 |
import torch
import random
def create_mock_target(number_of_nodes, number_of_classes):
"""
Creating a mock target vector.
"""
return torch.LongTensor([random.randint(0, number_of_classes-1) for node in range(number_of_nodes)]) | 1be4d86a0291d24f0be555d4eea7d29f0994db29 | 20,128 |
def is_iterable(obj):
"""
Return true if object has iterator but is not a string
:param object obj: Any object
:return: True if object is iterable but not a string.
:rtype: bool
"""
return hasattr(obj, '__iter__') and not isinstance(obj, str) | c7a1353f7f62a567a65d0c4752976fefde6e1904 | 20,129 |
def get_operator_module(operator_string):
"""
Get module name
"""
# the module, for when the operator is not a local operator
operator_path = ".".join(operator_string.split(".")[:-1])
assert len(operator_path) != 0, (
"Please specify a format like 'package.operator' to specify your operator. You passed in '%s'"
% operator_string
)
return operator_path | 82b4ddc419b09b5874debbe64262b4a4f414cb8f | 20,131 |
def is_fraction(obj):
"""Test whether the object is a valid fraction.
"""
return isinstance(obj, Fraction) | ab0a1b11274f837f479fb62648a144f0e689b499 | 20,132 |
def getExtrusion(matrix):
"""calculates DXF-Extrusion = Arbitrary Xaxis and Zaxis vectors
"""
AZaxis = matrix[2].copy().resize3D().normalize() # = ArbitraryZvector
Extrusion = [AZaxis[0],AZaxis[1],AZaxis[2]]
if AZaxis[2]==1.0:
Extrusion = None
AXaxis = matrix[0].copy().resize3D() # = ArbitraryXvector
else:
threshold = 1.0 / 64.0
if abs(AZaxis[0]) < threshold and abs(AZaxis[1]) < threshold:
# AXaxis is the intersection WorldPlane and ExtrusionPlane
AXaxis = M_CrossVecs(WORLDY,AZaxis)
else:
AXaxis = M_CrossVecs(WORLDZ,AZaxis)
#print 'deb:\n' #-------------
#print 'deb:getExtrusion() Extrusion=', Extrusion #---------
return Extrusion, AXaxis.normalize() | ec6133bddc9093310ffe1e807ae24882aa24edc3 | 20,133 |
def _build_class_include(env, class_name):
"""
If parentns::classname is included and fabric
properties such as puppet_parentns__classname_prop = val1
are set, the class included in puppet will be something like
class { 'parentns::classname':
prop => 'val1',
}
"""
include_def = "class { '%s': \n" % class_name
property_prefix = _property_prefix(class_name)
for name, value in env.iteritems():
if name.startswith(property_prefix):
property_name = name[len(property_prefix):]
if not property_name.startswith("_"): # else subclass property
include_def += " %s => '%s',\n" % (property_name, value)
include_def += "\n}"
return include_def | f58633fefb3ca853ef292f554eea4f98126c3ecb | 20,134 |
async def mention_html(user_id, name):
"""
The function is designed to output a link to a telegram.
"""
return f'<a href="tg://user?id={user_id}">{escape(name)}</a>' | eed9dd188f36e4d23bb16e274382372c6464f890 | 20,135 |
from plasma.flex.messaging.messages import small
def blaze_loader(alias):
"""
Loader for BlazeDS framework compatibility classes, specifically
implementing ISmallMessage.
.. seealso:: `BlazeDS (external)
<http://opensource.adobe.com/wiki/display/blazeds/BlazeDS>`_
:since: 0.1
"""
if alias not in ['DSC', 'DSK', 'DSA']:
return
reload(small)
return pyamf.get_class_alias(alias) | 956acd6aa9c36c186081a43e271b6a3c61b7a53f | 20,136 |
def get_user_pic(user_id, table):
"""[summary]
Gets users profile picture
Args:
user_id ([int]): [User id]
table ([string]): [Table target]
Returns:
[string]: [Filename]
"""
try:
connection = database_cred()
cursor = connection.cursor()
cursor = connection.cursor(dictionary=True)
if table == "admin":
cursor.execute(
'SELECT admin_pic FROM admin WHERE admin_id=%s', (user_id,))
if table == "user":
cursor.execute(
'SELECT user_pic FROM user WHERE user_id=%s', (user_id,))
records = cursor.fetchall()
except Error as e:
print("parameterized query failed {}".format(e))
finally:
if connection.is_connected():
connection.close()
cursor.close()
return records | 28ea65c793e88b967889fa39dc8588e4afd75e91 | 20,137 |
def convert_file_format(files,size):
"""
Takes filename queue and returns an example from it
using the TF Reader structure
"""
filename_queue = tf.train.string_input_producer(files,shuffle=True)
image_reader = tf.WholeFileReader()
_,image_file = image_reader.read(filename_queue)
image = tf.image.decode_jpeg(image_file)
image = tf.image.resize_images(image, [size,size])
image.set_shape((size,size,3))
return image | 0a889dbf8b851716f7a7788cee6cc1f7e7b4c0fc | 20,138 |
def validate_access_rule(supported_access_types, supported_access_levels,
access_rule, abort=False):
"""Validate an access rule.
:param access_rule: Access rules to be validated.
:param supported_access_types: List of access types that are regarded
valid.
:param supported_access_levels: List of access levels that are
regarded valid.
:param abort: a boolean value that indicates if an exception should
be raised whether the rule is invalid.
:return: Boolean.
"""
errmsg = _("Unsupported access rule of 'type' %(access_type)s, "
"'level' %(access_level)s, 'to' %(access_to)s: "
"%(field)s should be one of %(supported)s.")
access_param = access_rule.to_dict()
def validate(field, supported_tokens, excinfo):
if access_rule['access_%s' % field] in supported_tokens:
return True
access_param['field'] = field
access_param['supported'] = ', '.join(
"'%s'" % x for x in supported_tokens)
if abort:
LOG.error(errmsg, access_param)
raise excinfo['type'](
**{excinfo['about']: excinfo['details'] % access_param})
else:
LOG.warning(errmsg, access_param)
return False
valid = True
valid &= validate(
'type', supported_access_types,
{'type': exception.InvalidShareAccess, 'about': "reason",
'details': _(
"%(access_type)s; only %(supported)s access type is allowed")})
valid &= validate(
'level', supported_access_levels,
{'type': exception.InvalidShareAccessLevel, 'about': "level",
'details': "%(access_level)s"})
return valid | 2ce7ba446ec583b5b46dbd6a8eceeafe6cc46a6e | 20,139 |
def deduplicate(inp: SHAPE) -> SHAPE:
"""
Remove duplicates from any iterable while retaining the order of elements.
:param inp: iterable to deduplicate
:return: new, unique iterable of same type as input
"""
return type(inp)(dict.fromkeys(list(inp))) | d80ad3e00ce0bfa9a0625308267c5e25d8e3f3c9 | 20,141 |
def access_rules_synchronized(f):
"""Decorator for synchronizing share access rule modification methods."""
def wrapped_func(self, *args, **kwargs):
# The first argument is always a share, which has an ID
key = "share-access-%s" % args[0]['id']
@utils.synchronized(key)
def source_func(self, *args, **kwargs):
return f(self, *args, **kwargs)
return source_func(self, *args, **kwargs)
return wrapped_func | 03fe6b1905d825de1f20ed2967eb003f96fb2cce | 20,142 |
def import_python(path, package=None):
"""Get python module or object.
Parameters
----------
path : str
Fully-qualified python path, i.e. `package.module:object`.
package : str or None
Package name to use as an anchor if `path` is relative.
"""
parts = path.split(':')
if len(parts) > 2:
msg = f"Not a correct path ('{path}' has more than one object qualifier)"
raise ValueError(msg)
if len(parts) == 2:
module_path, obj = parts
else:
module_path, obj = path, None
module = import_module(module_path, package=package)
if obj:
return getattr(module, obj)
return module | ff2755964c0c24c5366e3243a1b2997176b33a4c | 20,143 |
from typing import Callable
from typing import Awaitable
async def feature_flags_scope_per_request(
request: Request, call_next: Callable[[Request], Awaitable[Response]]
) -> Response:
"""Use new feature flags copy for each request."""
# Create new copy of the feature flags, as we'll be modifying them later
# and do not want to change our system-wide feature flags.
with ff_ctx as feature_flags:
# FastAPI provides its own dependency injection mechanism, but just
# in case you are using starlette directly or there any other pure
# ASGI middlewares.
request.scope["feature_flags"] = feature_flags
return await call_next(request) | 9169a2f66f7fa60066695cfef5a320eedd566145 | 20,144 |
def get_scenes_need_processing(config_file, sensors):
"""
A function which finds all the processing steps for all the scenes which haven't yet been undertaken.
This is per scene processing rather than per step processing in the functions above.
Steps include:
* Download
* ARD Production
* Generating Tile Cache
* Generating Quicklook images
:param config_file: The EODataDown configuration file path.
:param sensors: list of sensor string names to be processed.
:returns: a list of lists where each scn has [config_file, scn_sensor, scn_id]
"""
sys_main_obj = eodatadown.eodatadownsystemmain.EODataDownSystemMain()
sys_main_obj.parse_config(config_file)
tasks = []
for sensor in sensors:
sensor_obj = sys_main_obj.get_sensor_obj(sensor)
scn_ids = []
if sensor_obj.calc_scn_usr_analysis():
scns = sensor_obj.get_scnlist_usr_analysis()
for scn in scns:
if scn not in scn_ids:
tasks.append([config_file, sensor, scn])
scn_ids.append(scn)
if sensor_obj.calc_scn_tilecache():
scns = sensor_obj.get_scnlist_quicklook()
for scn in scns:
if scn not in scn_ids:
tasks.append([config_file, sensor, scn])
scn_ids.append(scn)
if sensor_obj.calc_scn_quicklook():
scns = sensor_obj.get_scnlist_tilecache()
for scn in scns:
if scn not in scn_ids:
tasks.append([config_file, sensor, scn])
scn_ids.append(scn)
scns = sensor_obj.get_scnlist_con2ard()
for scn in scns:
if scn not in scn_ids:
tasks.append([config_file, sensor, scn])
scn_ids.append(scn)
scns = sensor_obj.get_scnlist_download()
for scn in scns:
if scn not in scn_ids:
tasks.append([config_file, sensor, scn])
scn_ids.append(scn)
return tasks | a600cd352980184ebe8382a5cabf9d8b09d9f688 | 20,146 |
def startingStateDistribution(env, N=100000):
"""
This function samples initial states for the environment and computes
an empirical estimator for the starting distribution mu_0
"""
rdInit = []
sample = {}
# Computing the starting state distribution
mu_0 = np.zeros((env.n_states,1))
for i in range(N):
rdInit.append(env.reset())
for i in range(0, env.n_states):
sample[i] = rdInit.count(i)
mu_0[i] = sample[i]/N
return mu_0 | 2685ebe6315a085ffdabbb82786499191c33d957 | 20,147 |
def get_changepoint_values_from_config(
changepoints_dict,
time_features_df,
time_col=cst.TIME_COL):
"""Applies the changepoint method specified in `changepoints_dict` to return the changepoint values
:param changepoints_dict: Optional[Dict[str, any]]
Specifies the changepoint configuration.
"method": str
The method to locate changepoints. Valid options:
"uniform". Places n_changepoints evenly spaced changepoints to allow growth to change.
"custom". Places changepoints at the specified dates.
Additional keys to provide parameters for each particular method are described below.
"continuous_time_col": Optional[str]
Column to apply `growth_func` to, to generate changepoint features
Typically, this should match the growth term in the model
"growth_func": Optional[func]
Growth function (scalar -> scalar). Changepoint features are created
by applying `growth_func` to "continuous_time_col" with offsets.
If None, uses identity function to use `continuous_time_col` directly
as growth term
If changepoints_dict["method"] == "uniform", this other key is required:
"n_changepoints": int
number of changepoints to evenly space across training period
If changepoints_dict["method"] == "custom", this other key is required:
"dates": Iterable[Union[int, float, str, datetime]]
Changepoint dates. Must be parsable by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset.
:param time_features_df: pd.Dataframe
training dataset. contains column "continuous_time_col"
:param time_col: str
The column name in `time_features_df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
Used only in the "custom" method.
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
changepoint_values = None
if changepoints_dict is not None:
valid_changepoint_methods = ["uniform", "custom"]
changepoint_method = changepoints_dict.get("method")
continuous_time_col = changepoints_dict.get("continuous_time_col")
if changepoint_method is None:
raise Exception("changepoint method must be specified")
if changepoint_method not in valid_changepoint_methods:
raise NotImplementedError(
f"changepoint method {changepoint_method} not recognized. "
f"Must be one of {valid_changepoint_methods}")
if changepoint_method == "uniform":
if changepoints_dict["n_changepoints"] > 0:
params = {"continuous_time_col": continuous_time_col} if continuous_time_col is not None else {}
changepoint_values = get_evenly_spaced_changepoints_values(
df=time_features_df,
n_changepoints=changepoints_dict["n_changepoints"],
**params)
elif changepoint_method == "custom":
params = {}
if time_col is not None:
params["time_col"] = time_col
if continuous_time_col is not None:
params["continuous_time_col"] = continuous_time_col
changepoint_values = get_custom_changepoints_values(
df=time_features_df,
changepoint_dates=changepoints_dict["dates"],
**params)
return changepoint_values | 0c38283e5744f180fbd326a549a4ee37b461c213 | 20,149 |
def jitChol(A, maxTries=10, warning=True):
"""Do a Cholesky decomposition with jitter.
Description:
U, jitter = jitChol(A, maxTries, warning) attempts a Cholesky
decomposition on the given matrix, if matrix isn't positive
definite the function adds 'jitter' and tries again. Thereafter
the amount of jitter is multiplied by 10 each time it is added
again. This is continued for a maximum of 10 times. The amount of
jitter added is returned.
Returns:
U - the Cholesky decomposition for the matrix.
jitter - the amount of jitter that was added to the matrix.
Arguments:
A - the matrix for which the Cholesky decomposition is required.
maxTries - the maximum number of times that jitter is added before
giving up (default 10).
warning - whether to give a warning for adding jitter (default is True)
See also
CHOL, PDINV, LOGDET
Copyright (c) 2005, 2006 Neil D. Lawrence
"""
jitter = 0
i = 0
while(True):
try:
# Try --- need to check A is positive definite
if jitter == 0:
jitter = abs(SP.trace(A))/A.shape[0]*1e-6
LC = linalg.cholesky(A, lower=True)
return LC.T, 0.0
else:
if warning:
# pdb.set_trace()
# plt.figure()
# plt.imshow(A, interpolation="nearest")
# plt.colorbar()
# plt.show()
logging.error("Adding jitter of %f in jitChol()." % jitter)
LC = linalg.cholesky(A+jitter*SP.eye(A.shape[0]), lower=True)
return LC.T, jitter
except linalg.LinAlgError:
# Seems to have been non-positive definite.
if i<maxTries:
jitter = jitter*10
else:
raise linalg.LinAlgError, "Matrix non positive definite, jitter of " + str(jitter) + " added but failed after " + str(i) + " trials."
i += 1
return LC | ac2cbc35a3a0c33208456765512893554d91f75c | 20,150 |
import requests
def stock_individual_info_em(symbol: str = "603777") -> pd.DataFrame:
"""
东方财富-个股-股票信息
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:return: 股票信息
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
url = "http://push2.eastmoney.com/api/qt/stock/get"
params = {
'ut': 'fa5fd1943c7b386f172d6893dbfba10b',
'fltt': '2',
'invt': '2',
'fields': 'f120,f121,f122,f174,f175,f59,f163,f43,f57,f58,f169,f170,f46,f44,f51,f168,f47,f164,f116,f60,f45,f52,f50,f48,f167,f117,f71,f161,f49,f530,f135,f136,f137,f138,f139,f141,f142,f144,f145,f147,f148,f140,f143,f146,f149,f55,f62,f162,f92,f173,f104,f105,f84,f85,f183,f184,f185,f186,f187,f188,f189,f190,f191,f192,f107,f111,f86,f177,f78,f110,f262,f263,f264,f267,f268,f255,f256,f257,f258,f127,f199,f128,f198,f259,f260,f261,f171,f277,f278,f279,f288,f152,f250,f251,f252,f253,f254,f269,f270,f271,f272,f273,f274,f275,f276,f265,f266,f289,f290,f286,f285,f292,f293,f294,f295',
"secid": f"{code_id_dict[symbol]}.{symbol}",
'_': '1640157544804',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.reset_index(inplace=True)
del temp_df['rc']
del temp_df['rt']
del temp_df['svr']
del temp_df['lt']
del temp_df['full']
code_name_map = {
'f57': '股票代码',
'f58': '股票简称',
'f84': '总股本',
'f85': '流通股',
'f127': '行业',
'f116': '总市值',
'f117': '流通市值',
'f189': '上市时间',
}
temp_df['index'] = temp_df['index'].map(code_name_map)
temp_df = temp_df[pd.notna(temp_df['index'])]
if 'dlmkts' in temp_df.columns:
del temp_df['dlmkts']
temp_df.columns = [
'item',
'value',
]
temp_df.reset_index(inplace=True, drop=True)
return temp_df | 6d04941cb1aeaed49450eff61e957aab26bbf21a | 20,151 |
def InverseDynamicsTool_safeDownCast(obj):
"""
InverseDynamicsTool_safeDownCast(OpenSimObject obj) -> InverseDynamicsTool
Parameters
----------
obj: OpenSim::Object *
"""
return _tools.InverseDynamicsTool_safeDownCast(obj) | 3060244716c54e10953df5aa8db1c55076a040a2 | 20,152 |
def build_decoder(encoding_dim,sparse):
""""build and return the decoder linked with the encoder"""
input_img = Input(shape=(28*28,))
encoder = build_encoder(encoding_dim,sparse)
input_encoded = encoder(input_img)
decoded = Dense(64, activation='relu')(input_encoded)
decoded = Dense(128, activation='relu')(decoded)
decoded = Dense(28*28,activation='relu')(decoded)
decoder = Model(input_img,decoded)
return decoder | 207535e38fd45e7ea6e0143c34607213747328ba | 20,154 |
def find_usable_exits(room, stuff):
"""
Given a room, and the player's stuff, find a list of exits that they can use right now.
That means the exits must not be hidden, and if they require a key, the player has it.
RETURNS
- a list of exits that are visible (not hidden) and don't require a key!
"""
usable = []
missing_key = []
for exit in room['exits']:
if exit.get("hidden", False):
continue
if "required_key" in exit:
if exit["required_key"] in stuff:
usable.append(exit)
continue
else:
missing_key.append(exit)
usable.append(exit)
continue
continue
usable.append(exit)
return usable, missing_key | 529bacbf33b5680774b291782fdcefe650cafeca | 20,155 |
def get_normal_map(x, area_weighted=False):
"""
x: [bs, h, w, 3] (x,y,z) -> (nx,ny,nz)
"""
nn = 6
p11 = x
p = tf.pad(x, tf.constant([[0,0], [1,1], [1,1], [0,0]]))
p11 = p[:, 1:-1, 1:-1, :]
p10 = p[:, 1:-1, 0:-2, :]
p01 = p[:, 0:-2, 1:-1, :]
p02 = p[:, 0:-2, 2:, :]
p12 = p[:, 1:-1, 2:, :]
p20 = p[:, 2:, 0:-2, :]
p21 = p[:, 2:, 1:-1, :]
pos = [p10, p01, p02, p12, p21, p20]
for i in range(nn):
pos[i] = tf.subtract(pos[i], p11)
normals = []
for i in range(1, nn):
normals.append(tf.cross(pos[i%nn], pos[(i-1+nn)%nn]))
normal = tf.reduce_sum(tf.stack(normals), axis=0)
if not area_weighted:
normal = tf.nn.l2_normalize(normal, 3)
normal = tf.where(tf.is_nan(normal),
tf.zeros_like(normal), normal)
return normal | 1b087113d6bc68a24195459ece006c7a74848a63 | 20,156 |
def _ros_group_rank(df, dl_idx, censorship):
"""
Ranks each observation within the data groups.
In this case, the groups are defined by the record's detection
limit index and censorship status.
Parameters
----------
df : pandas.DataFrame
dl_idx : str
Name of the column in the dataframe the index of the
observations' corresponding detection limit in the `cohn`
dataframe.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
-------
ranks : numpy.array
Array of ranks for the dataset.
"""
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
ranks = df.copy()
ranks.loc[:, 'rank'] = 1
ranks = (
ranks.groupby(by=[dl_idx, censorship])['rank']
.transform(lambda g: g.cumsum())
)
return ranks | f4495eb57d158745603899086e59643edec1e489 | 20,157 |
def f_all(predicate, iterable):
"""Return whether predicate(i) is True for all i in iterable
>>> is_odd = lambda num: (num % 2 == 1)
>>> f_all(is_odd, [])
True
>>> f_all(is_odd, [1, 3, 5, 7, 9])
True
>>> f_all(is_odd, [2, 1, 3, 5, 7, 9])
False
"""
return all(predicate(i) for i in iterable) | c0a0e52587a7afc9da143ac936aab87ad531b455 | 20,158 |
from typing import List
from typing import Tuple
from typing import Set
from typing import Dict
def _recursive_replace(data):
"""Searches data structure and replaces 'nan' and 'inf' with respective float values"""
if isinstance(data, str):
if data == "nan":
return float("nan")
if data == "inf":
return float("inf")
if isinstance(data, List):
return [_recursive_replace(v) for v in data]
if isinstance(data, Tuple):
return tuple([_recursive_replace(v) for v in data])
if isinstance(data, Set):
return set([_recursive_replace(v) for v in data])
if isinstance(data, Dict):
return {k: _recursive_replace(v) for k, v in data.items()}
return data | b5c21d806b462070b2d1eec7d91a5dc700f6b0ed | 20,159 |
def trans_text_ch_to_vector(txt_file, word_num_map, txt_label=None):
""" Trans chinese chars to vector
:param txt_file:
:param word_num_map:
:param txt_label:
:return:
"""
words_size = len(word_num_map)
to_num = lambda word: word_num_map.get(word.encode('utf-8'), words_size)
if txt_file != None:
txt_label = get_ch_lable(txt_file)
labels_vector = list(map(to_num, txt_label))
return labels_vector | 83370ca18303e1286b099d646362db14cd4b5dbd | 20,160 |
def adjust_bag(request, item_id):
""" Adjust the quantity of a product to the specified amount"""
quantity = int('0'+request.POST.get('quantity'))
bag = request.session.get('bag', {})
if quantity > 0:
bag[item_id] = quantity
else:
messages.error(request, 'Value must greather than or equal to 1.\
If you do not need this product, click on the Remove button.')
request.session['bag'] = bag
return redirect(reverse('view_bag')) | a2814adcffbc04ee02b18bd14fc7daf0dbe58677 | 20,161 |
def _condexpr_value(e):
"""Evaluate the value of the input expression.
"""
assert type(e) == tuple
assert len(e) in [2, 3]
if len(e) == 3:
if e[0] in ARITH_SET:
return _expr_value(e)
left = _condexpr_value(e[1])
right = _condexpr_value(e[2])
if type(left) != type(right):
# Boolean result expected
return False
elif e[0] == 'and':
return left and right
elif e[0] == 'or':
return left or right
elif e[0] == '=':
return left == right
elif e[0] == '!=':
return left != right
elif e[0] == '>':
return left > right
elif e[0] == '>=':
return left >= right
elif e[0] == '<':
return left < right
elif e[0] == '<=':
return left <= right
elif e[0] == 'not':
return not _condexpr_value(e[1])
elif e[0] in ['string', 'number', 'boolean']:
return e[1]
elif e[0] == 'identifier':
return get_config(e[1])['value']
raise Exception("Unexpected depend list: " + str(e)) | 3973a22b5c5553c2c1b70b94f97be4d54f224766 | 20,163 |
import socket
def in6_isincluded(addr, prefix, plen):
"""
Returns True when 'addr' belongs to prefix/plen. False otherwise.
"""
temp = inet_pton(socket.AF_INET6, addr)
pref = in6_cidr2mask(plen)
zero = inet_pton(socket.AF_INET6, prefix)
return zero == in6_and(temp, pref) | 4003d9b61ddb8f37207a2d332a31e4ee3a97cad7 | 20,164 |
def vis_channel(model, layer, channel_n):
"""
This function creates a visualization for a single channel in a layer
:param model: model we are visualizing
:type model: lucid.modelzoo
:param layer: the name of the layer we are visualizing
:type layer: string
:param channel_n: The channel number in the layer we are optimizing for
:type channel_n: int
:return: array of pixel values for the visualization
"""
print('Getting vis for ' + layer + ', channel ' + str(channel_n))
l_name = dla_lucid.LAYERS[layer][0]
obj = objectives.channel(l_name, channel_n)
imgs = render.render_vis(model, obj, dla_lucid.PARAM_1D,
thresholds=dla_lucid.THRESH_1D, transforms=dla_lucid.TFORMS_1D, verbose=False)
imgs_array = np.array(imgs)
imgs_reshaped = imgs_array.reshape(400)
return imgs_reshaped | b6f1b72be81fa317fc59b3582b9f43afb640a4d6 | 20,165 |
from typing import Tuple
import time
def processing(log: EventLog, causal: Tuple[str, str], follows: Tuple[str, str]):
"""
Applying the Alpha Miner with the new relations
Parameters
-------------
log
Filtered log
causal
Pairs that have a causal relation (->)
follows
Pairs that have a follow relation (>)
Returns
-------------
net
Petri net
im
Initial marking
fm
Final marking
"""
# create list of all events
labels = set()
start_activities = set()
end_activities = set()
for trace in log:
start_activities.add(trace.__getitem__(0))
end_activities.add(trace.__getitem__(len(trace) - 1))
for events in trace:
labels.add(events)
labels = list(labels)
pairs = []
for key, element in causal.items():
for item in element:
if get_sharp_relation(follows, key, key):
if get_sharp_relation(follows, item, item):
pairs.append(({key}, {item}))
# combining pairs
for i in range(0, len(pairs)):
t1 = pairs[i]
for j in range(i, len(pairs)):
t2 = pairs[j]
if t1 != t2:
if t1[0].issubset(t2[0]) or t1[1].issubset(t2[1]):
if get_sharp_relations_for_sets(follows, t1[0], t2[0]) and get_sharp_relations_for_sets(follows,
t1[1],
t2[1]):
new_alpha_pair = (t1[0] | t2[0], t1[1] | t2[1])
if new_alpha_pair not in pairs:
pairs.append((t1[0] | t2[0], t1[1] | t2[1]))
# maximize pairs
cleaned_pairs = list(filter(lambda p: __pair_maximizer(pairs, p), pairs))
# create transitions
net = PetriNet('alpha_plus_net_' + str(time.time()))
label_transition_dict = {}
for label in labels:
if label != 'artificial_start' and label != 'artificial_end':
label_transition_dict[label] = PetriNet.Transition(label, label)
net.transitions.add(label_transition_dict[label])
else:
label_transition_dict[label] = PetriNet.Transition(label, None)
net.transitions.add(label_transition_dict[label])
# and source and sink
src = add_source(net, start_activities, label_transition_dict)
sink = add_sink(net, end_activities, label_transition_dict)
# create places
for pair in cleaned_pairs:
place = PetriNet.Place(str(pair))
net.places.add(place)
for in_arc in pair[0]:
add_arc_from_to(label_transition_dict[in_arc], place, net)
for out_arc in pair[1]:
add_arc_from_to(place, label_transition_dict[out_arc], net)
return net, Marking({src: 1}), Marking({sink: 1}), cleaned_pairs | 5841c82462432edddddf1b0dfd965b1043bc7277 | 20,166 |
from typing import List
import re
def word_tokenize(string: str, language: str = "english") -> List[str]:
"""tokenizes a given string into a list of substrings.
:param string: String to tokenize.
:param language: Language. Either one of ``english'' or ``german''.
"""
if language not in ["english", "german"]:
raise ValueError("language argument has to be either ``english'' or ``german''")
# excessive whitespaces
string = re.sub(r"\s+", " ", string)
# some unicode characters
string = string.replace("’", "'")
string = string.replace("”", '"')
string = string.replace("“", '"')
# floating point (e.g., 1.3 => 1.3)
string = re.sub(r"(\d+)\.(\d+)", r"\g<1>._\g<2>", string)
# percentage (e.g., below.500 => below .500)
string = re.sub(r"(\w+)\.(\d+)", r"\g<1> ._\g<2>", string)
# end of quote
string = string.replace(".``", ". ``")
# number with apostrophe (e.g. '90)
string = re.sub(r"\s'(\d+)", r"' \g<1>", string)
# names with Initials (e.g. C. J. Miles)
string = re.sub(r"(^|\s)(\w)\. (\w)\.", r"\g<1>\g<2>._ \g<3>._", string)
# some dots
string = string.replace("..", " ..")
# names with apostrophe => expands temporarily
string = re.sub(r"\w+'(?!d|s|ll|t|re|ve|\s)", r"\g<0>_", string)
# win-loss scores (German notation seems to be XX:YY, but this is also the time format,
# and the times are not tokenized in the original RotoWire. So we manually handle XX:YY
# expression.
string = re.sub(r"(\d+)-(\d+)", r"\g<1> - \g<2>", string)
string = re.sub(r"(\d+)-of-(\d+)", r"\g<1> - of - \g<2>", string)
# actual tokenization
tokenized = nltk.word_tokenize(string, language=language)
joined = " ".join(tokenized)
# shrink expanded name-with-apostrophe expressions
joined = joined.replace("'_", "'")
# shrink expanded name-with-initial expressions
joined = joined.replace("._", ".")
tokenized = joined.split(" ")
return tokenized | 00cb30031fc5a9e7ddbfcffeae9fad031f463cb3 | 20,167 |
import torch
def modify_scaffolds_with_coords(scaffolds, coords):
""" Gets scaffolds and fills in the right data.
Inputs:
* scaffolds: dict. as returned by `build_scaffolds_from_scn_angles`
* coords: (L, 14, 3). sidechainnet tensor. same device as scaffolds
Outputs: corrected scaffolds
"""
# calculate distances and update:
# N, CA, C
scaffolds["bond_mask"][1:, 0] = torch.norm(coords[1:, 0] - coords[:-1, 2], dim=-1) # N
scaffolds["bond_mask"][ :, 1] = torch.norm(coords[ :, 1] - coords[: , 0], dim=-1) # CA
scaffolds["bond_mask"][ :, 2] = torch.norm(coords[ :, 2] - coords[: , 1], dim=-1) # C
# O, CB, side chain
selector = np.arange(len(coords))
for i in range(3, 14):
# get indexes
idx_a, idx_b, idx_c = scaffolds["point_ref_mask"][:, :, i-3] # (3, L, 11) -> 3 * (L, 11)
# correct distances
scaffolds["bond_mask"][:, i] = torch.norm(coords[:, i] - coords[selector, idx_c], dim=-1)
# get angles
scaffolds["angles_mask"][0, :, i] = get_angle(coords[selector, idx_b],
coords[selector, idx_c],
coords[:, i])
# handle C-beta, where the C requested is from the previous aa
if i == 4:
# for 1st residue, use position of the second residue's N
first_next_n = coords[1, :1] # 1, 3
# the c requested is from the previous residue
main_c_prev_idxs = coords[selector[:-1], idx_a[1:]]# (L-1), 3
# concat
coords_a = torch.cat([first_next_n, main_c_prev_idxs])
else:
coords_a = coords[selector, idx_a]
# get dihedrals
scaffolds["angles_mask"][1, :, i] = get_dihedral(coords_a,
coords[selector, idx_b],
coords[selector, idx_c],
coords[:, i])
# correct angles and dihedrals for backbone
scaffolds["angles_mask"][0, :-1, 0] = get_angle(coords[:-1, 1], coords[:-1, 2], coords[1: , 0]) # ca_c_n
scaffolds["angles_mask"][0, 1:, 1] = get_angle(coords[:-1, 2], coords[1:, 0], coords[1: , 1]) # c_n_ca
scaffolds["angles_mask"][0, :, 2] = get_angle(coords[:, 0], coords[ :, 1], coords[ : , 2]) # n_ca_c
# N determined by previous psi = f(n, ca, c, n+1)
scaffolds["angles_mask"][1, :-1, 0] = get_dihedral(coords[:-1, 0], coords[:-1, 1], coords[:-1, 2], coords[1:, 0])
# CA determined by omega = f(ca, c, n+1, ca+1)
scaffolds["angles_mask"][1, 1:, 1] = get_dihedral(coords[:-1, 1], coords[:-1, 2], coords[1:, 0], coords[1:, 1])
# C determined by phi = f(c-1, n, ca, c)
scaffolds["angles_mask"][1, 1:, 2] = get_dihedral(coords[:-1, 2], coords[1:, 0], coords[1:, 1], coords[1:, 2])
return scaffolds | 6d0853c3749fbf251cb3147109dab8951603c99c | 20,169 |
import pickle
from .stem import _classification_textcleaning_stemmer
def multinomial(**kwargs):
"""
Load multinomial toxicity model.
Parameters
----------
validate: bool, optional (default=True)
if True, malaya will check model availability and download if not available.
Returns
-------
BAYES : malaya._models._sklearn_model.MULTILABEL_BAYES class
"""
check_file(
PATH_TOXIC['multinomial'], S3_PATH_TOXIC['multinomial'], **kwargs
)
try:
with open(PATH_TOXIC['multinomial']['model'], 'rb') as fopen:
multinomial = pickle.load(fopen)
with open(PATH_TOXIC['multinomial']['vector'], 'rb') as fopen:
vectorize = pickle.load(fopen)
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('toxic/multinomial') and try again"
)
return MULTILABEL_BAYES(
models = multinomial,
vectors = vectorize,
cleaning = _classification_textcleaning_stemmer,
) | 78bb3ceffefd6b38c304758eda8c0bafe36462ab | 20,170 |
import logging
def create_bucket(bucket_name, region="us-west-2"):
"""Create an S3 bucket in a specified region
:param bucket_name: Bucket to create
:param region: String region to create bucket in, e.g., 'us-west-2'
:return: True if bucket created, else False
"""
# Create bucket
try:
# get list of existing buckets
s3_client = boto3.client('s3', region_name=region)
list_buckets = s3_client.list_buckets()
for bucket in list_buckets['Buckets']:
if bucket["Name"] == bucket_name:
print("------- Bucket already exists")
return s3_client
location = {'LocationConstraint': region}
s3_client.create_bucket(Bucket=bucket_name,
CreateBucketConfiguration=location)
return s3_client
except ClientError as e:
logging.error(e)
return | c2d655982563c233a027dc94f9b73e8899aeddb7 | 20,171 |
def create_client():
"""Return a client socket that may be connected to a remote address."""
return _new_sock() | 5d0515c731d4c087c7b118923aa579d4bcd1e881 | 20,172 |
import warnings
import copy
def derivative_surface(obj):
""" Computes the hodograph (first derivative) surface of the input surface.
This function constructs the hodograph (first derivative) surface from the input surface by computing the degrees,
knot vectors and the control points of the derivative surface.
The return value of this function is a tuple containing the following derivative surfaces in the given order:
* U-derivative surface (derivative taken only on the u-direction)
* V-derivative surface (derivative taken only on the v-direction)
* UV-derivative surface (derivative taken on both the u- and the v-direction)
:param obj: input surface
:type obj: abstract.Surface
:return: derivative surfaces w.r.t. u, v and both u-v
:rtype: tuple
"""
if not isinstance(obj, abstract.Surface):
raise TypeError("Input shape must be an instance of abstract.Surface class")
if obj.rational:
warnings.warn("Cannot compute hodograph surface for a rational surface")
return obj
# Find the control points of the derivative surface
d = 2 # 0 <= k + l <= d, see pg. 114 of The NURBS Book, 2nd Ed.
pkl = evaluators.SurfaceEvaluator2.derivatives_ctrlpts(r1=0, r2=obj.ctrlpts_size_u - 1,
s1=0, s2=obj.ctrlpts_size_v - 1,
degree_u=obj.degree_u, degree_v=obj.degree_v,
ctrlpts_size_u=obj.ctrlpts_size_u,
ctrlpts_size_v=obj.ctrlpts_size_v,
knotvector_u=obj.knotvector_u, knotvector_v=obj.knotvector_v,
ctrlpts=obj.ctrlpts2d,
dimension=obj.dimension,
deriv_order=d)
ctrlpts2d_u = []
for i in range(0, len(pkl[1][0]) - 1):
ctrlpts2d_u.append(pkl[1][0][i])
surf_u = copy.deepcopy(obj)
surf_u.degree_u = obj.degree_u - 1
surf_u.ctrlpts2d = ctrlpts2d_u
surf_u.knotvector_u = obj.knotvector_u[1:-1]
surf_u.delta = obj.delta
ctrlpts2d_v = []
for i in range(0, len(pkl[0][1])):
ctrlpts2d_v.append(pkl[0][1][i][0:-1])
surf_v = copy.deepcopy(obj)
surf_v.degree_v = obj.degree_v - 1
surf_v.ctrlpts2d = ctrlpts2d_v
surf_v.knotvector_v = obj.knotvector_v[1:-1]
surf_v.delta = obj.delta
ctrlpts2d_uv = []
for i in range(0, len(pkl[1][1]) - 1):
ctrlpts2d_uv.append(pkl[1][1][i][0:-1])
# Generate the derivative curve
surf_uv = obj.__class__()
surf_uv.degree_u = obj.degree_u - 1
surf_uv.degree_v = obj.degree_v - 1
surf_uv.ctrlpts2d = ctrlpts2d_uv
surf_uv.knotvector_u = obj.knotvector_u[1:-1]
surf_uv.knotvector_v = obj.knotvector_v[1:-1]
surf_uv.delta = obj.delta
return surf_u, surf_v, surf_uv | f9b846c0b2b17e315ae4b98138719361675df557 | 20,173 |
def configure(config):
"""
| [bing ] | example | purpose |
| -------- | ------- | ------- |
| api_key | VBsdaiY23sdcxuNG1gP+YBsCwJxzjfHgdsXJG5 | Bing Primary Account Key |
"""
chunk = ''
if config.option('Configuring bing search module', False):
config.interactive_add('bing', 'api_key', 'Bing Primary Account Key', '')
return chunk | 87ccd4694cfbf34d24e6e31f2b485aaa465ba68b | 20,174 |
def CVRMSE(ip1,ip2):
""" The normalized RMSE (= Root Mean Square Error) is defined as CVRMSE(X,Y) = sqrt[ sum_i(Yi-Xi)^2 / N ] / mean(Yi) ) """
stats = ip1.getStatistics()
return RMSE(ip1,ip2) / stats.mean | 0981637da92d2a60c6281f216587fa5bc798d554 | 20,175 |
def get_verified_aid_pairs(ibs):
"""
Example:
>>> # DISABLE_DOCTEST
>>> from wbia_cnn._plugin import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb('NNP_Master3', allow_newdir=True)
>>> verified_aid1_list, verified_aid2_list = get_verified_aid_pairs(ibs)
"""
# Grab marked hard cases
am_rowids = ibs._get_all_annotmatch_rowids()
remove_photobombs = True
if remove_photobombs:
flags = ibs.get_annotmatch_is_photobomb(am_rowids)
am_rowids = ut.filterfalse_items(am_rowids, flags)
verified_aid1_list = ibs.get_annotmatch_aid1(am_rowids)
verified_aid2_list = ibs.get_annotmatch_aid2(am_rowids)
return verified_aid1_list, verified_aid2_list | 91eb788a6b1f781e5796b03f56292a807aaee60d | 20,176 |
def audio_sort_key(ex):
"""Sort using duration time of the sound spectrogram."""
return ex.src.size(1) | ec940df6bf2b74962f221b84717f51beba5c4f5f | 20,177 |
from pathlib import Path
def _filename_to_title(filename, split_char="_"):
"""Convert a file path into a more readable title."""
filename = Path(filename).with_suffix("").name
filename_parts = filename.split(split_char)
try:
# If first part of the filename is a number for ordering, remove it
int(filename_parts[0])
if len(filename_parts) > 1:
filename_parts = filename_parts[1:]
except Exception:
pass
title = " ".join(ii.capitalize() for ii in filename_parts)
return title | f62ae56901f0a58e53e84e63423bcb9f2ccf4c5a | 20,178 |
def basis_function_contributions(universe, mo, mocoefs='coef',
tol=0.01, ao=None, frame=0):
"""
Provided a universe with momatrix and basis_set_order attributes,
return the major basis function contributions of a particular
molecular orbital.
.. code-block:: python
# display the 16th orbital coefficients > abs(0.15)
basis_function_contributions(uni, 15, tol=0.15) # 0-based indexing!
Args:
universe (class:`exatomic.core.universe.Universe`): a universe
mo (int): molecular orbital index
mocoefs (str): column of interest in universe.momatrix
tol (float): minimum value of coefficient by which to filter
frame (int): frame of the universe (default is zero)
Returns:
joined (pd.DataFrame): a join of momatrix and basis_set_order
"""
small = universe.momatrix.contributions(mo, tol=tol, mocoefs=mocoefs, frame=frame)
chis = small['chi'].values
coefs = small[mocoefs]
coefs.index = chis
joined = pd.concat([universe.basis_set_order.ix[chis], coefs], axis=1)
if ao is None:
return joined
else:
raise NotImplementedError("not clever enough for that.") | afe695d15d3aa43baae0ce7e0dcf2fb84f53c699 | 20,179 |
from re import S
def bspline_basis(d, knots, n, x, close=True):
"""The `n`-th B-spline at `x` of degree `d` with knots.
B-Splines are piecewise polynomials of degree `d` [1]_. They are defined on
a set of knots, which is a sequence of integers or floats.
The 0th degree splines have a value of one on a single interval:
>>> from sympy import bspline_basis
>>> from sympy.abc import x
>>> d = 0
>>> knots = range(5)
>>> bspline_basis(d, knots, 0, x)
Piecewise((1, And(x <= 1, x >= 0)), (0, True))
For a given ``(d, knots)`` there are ``len(knots)-d-1`` B-splines defined, that
are indexed by ``n`` (starting at 0).
Here is an example of a cubic B-spline:
>>> bspline_basis(3, range(5), 0, x)
Piecewise((x**3/6, And(x < 1, x >= 0)),
(-x**3/2 + 2*x**2 - 2*x + 2/3, And(x < 2, x >= 1)),
(x**3/2 - 4*x**2 + 10*x - 22/3, And(x < 3, x >= 2)),
(-x**3/6 + 2*x**2 - 8*x + 32/3, And(x <= 4, x >= 3)),
(0, True))
By repeating knot points, you can introduce discontinuities in the
B-splines and their derivatives:
>>> d = 1
>>> knots = [0,0,2,3,4]
>>> bspline_basis(d, knots, 0, x)
Piecewise((-x/2 + 1, And(x <= 2, x >= 0)), (0, True))
It is quite time consuming to construct and evaluate B-splines. If you
need to evaluate a B-splines many times, it is best to lambdify them
first:
>>> from sympy import lambdify
>>> d = 3
>>> knots = range(10)
>>> b0 = bspline_basis(d, knots, 0, x)
>>> f = lambdify(x, b0)
>>> y = f(0.5)
See Also
========
bsplines_basis_set
References
==========
.. [1] http://en.wikipedia.org/wiki/B-spline
"""
knots = [sympify(k) for k in knots]
d = int(d)
n = int(n)
n_knots = len(knots)
n_intervals = n_knots - 1
if n + d + 1 > n_intervals:
raise ValueError('n + d + 1 must not exceed len(knots) - 1')
if d == 0:
result = Piecewise(
(S.One, Interval(knots[n], knots[n + 1], False,
not close).contains(x)),
(0, True)
)
elif d > 0:
denom = knots[n + d + 1] - knots[n + 1]
if denom != S.Zero:
B = (knots[n + d + 1] - x)/denom
b2 = bspline_basis(d - 1, knots, n + 1, x, close)
else:
b2 = B = S.Zero
denom = knots[n + d] - knots[n]
if denom != S.Zero:
A = (x - knots[n])/denom
b1 = bspline_basis(
d - 1, knots, n, x, close and (B == S.Zero or b2 == S.Zero))
else:
b1 = A = S.Zero
result = _add_splines(A, b1, B, b2)
else:
raise ValueError('degree must be non-negative: %r' % n)
return result | 266a8ef3176e11cc598015ebb963c13ddcee9e31 | 20,180 |
def is_versioned(obj):
"""
Check if a given object is versioned by inspecting some of its attributes.
"""
# before any heuristic, newer versions of RGW will tell if an obj is
# versioned so try that first
if hasattr(obj, 'versioned'):
return obj.versioned
if not hasattr(obj, 'VersionedEpoch'):
# overly paranoid here, an object that is not versioned should *never*
# have a `VersionedEpoch` attribute
if getattr(obj, 'version_id', None):
if obj.version_id is None:
return False
return True # probably will never get here
return False
return True | 7f5ad90ffce6a8efde50dba47cdc63673ec79f60 | 20,181 |
def preprocess_and_suggest_hyperparams(
task,
X,
y,
estimator_or_predictor,
location=None,
):
"""Preprocess the data and suggest hyperparameters.
Example:
```python
hyperparams, estimator_class, X, y, feature_transformer, label_transformer = \
preprocess_and_suggest_hyperparams("classification", X_train, y_train, "xgb_limitdepth")
model = estimator_class(**hyperparams) # estimator_class is XGBClassifier
model.fit(X, y)
X_test = feature_transformer.transform(X_test)
y_pred = label_transformer.inverse_transform(pd.Series(model.predict(X_test).astype(int)))
```
Args:
task: A string of the task type, e.g.,
'classification', 'regression', 'ts_forecast', 'rank',
'seq-classification', 'seq-regression'.
X: A dataframe of training data in shape n*m.
For 'ts_forecast' task, the first column of X_train
must be the timestamp column (datetime type). Other
columns in the dataframe are assumed to be exogenous
variables (categorical or numeric).
y: A series of labels in shape n*1.
estimator_or_predictor: A str of the learner name or a dict of the learned config predictor.
"choose_xgb" means choosing between xgb_limitdepth and xgboost.
If a dict, it contains:
- "version": a str of the version number.
- "preprocessing": a dictionary containing:
* "center": a list of meta feature value offsets for normalization.
* "scale": a list of meta feature scales to normalize each dimension.
- "neighbors": a list of dictionaries. Each dictionary contains:
* "features": a list of the normalized meta features for a neighbor.
* "choice": a integer of the configuration id in the portfolio.
- "portfolio": a list of dictionaries, each corresponding to a configuration:
* "class": a str of the learner name.
* "hyperparameters": a dict of the config. They key "FLAML_sample_size" will be ignored.
location: (Optional) A str of the location containing mined portfolio file.
Only valid when the portfolio is a str, by default the location is flaml/default.
Returns:
hyperparams: A dict of the hyperparameter configurations.
estiamtor_class: A class of the underlying estimator, e.g., lightgbm.LGBMClassifier.
X: the preprocessed X.
y: the preprocessed y.
feature_transformer: a data transformer that can be applied to X_test.
label_transformer: a label transformer that can be applied to y_test.
"""
dt = DataTransformer()
X, y = dt.fit_transform(X, y, task)
if "choose_xgb" == estimator_or_predictor:
# choose between xgb_limitdepth and xgboost
estimator_or_predictor = suggest_learner(
task,
X,
y,
estimator_list=["xgb_limitdepth", "xgboost"],
location=location,
)
config = suggest_config(task, X, y, estimator_or_predictor, location=location, k=1)[
0
]
estimator = config["class"]
model_class = get_estimator_class(task, estimator)
hyperparams = config["hyperparameters"]
model = model_class(task=task, **hyperparams)
if model.estimator_class is None:
return hyperparams, model_class, X, y, None, None
else:
estimator_class = model.estimator_class
X = model._preprocess(X)
hyperparams = hyperparams and model.params
class AutoMLTransformer:
def transform(self, X):
return model._preprocess(dt.transform(X))
transformer = AutoMLTransformer()
return hyperparams, estimator_class, X, y, transformer, dt.label_transformer | cd388bea6c9bfbb5d38001c549f2fe92d16aff41 | 20,182 |
def passphrase_from_private_key(private_key):
"""Return passphrase from provided private key."""
return mnemonic.from_private_key(private_key) | aed1c465795d22fd80680c0484d377fa6cabf0c8 | 20,183 |
def merge_on_empty_fields(base, tomerge):
"""Utility to quickly fill empty or falsy field of $base with fields
of $tomerge
"""
has_merged_anything = False
for key in tomerge:
if not base.get(key):
base[key] = tomerge.get(key)
has_merged_anything = True
return has_merged_anything | f8cb14047d2e17e2155beb1ab86eab7cdf531af0 | 20,184 |
def clear_rows(grid, locked):
"""Deletes the row, if that row is filled."""
increment = 0
for i in range(len(grid) - 1, -1, -1):
row = grid[i]
if (0, 0, 0) not in row:
increment += 1
index = i
for j in range(len(row)):
try:
del locked[(j, i)]
except:
continue
if increment > 0:
for key in sorted(list(locked), key=lambda x: x[1])[::-1]:
x, y = key
if y < index:
newKey = (x, y + increment)
locked[newKey] = locked.pop(key)
return increment * 1.5 | 5974a129ac0bb756ee1038f61c9eeaf625ccbb72 | 20,185 |
import shlex
def call(cmd_args, suppress_output=False):
""" Call an arbitary command and return the exit value, stdout, and stderr as a tuple
Command can be passed in as either a string or iterable
>>> result = call('hatchery', suppress_output=True)
>>> result.exitval
0
>>> result = call(['hatchery', 'notreal'])
>>> result.exitval
1
"""
if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args):
cmd_args = shlex.split(cmd_args)
logger.info('executing `{}`'.format(' '.join(cmd_args)))
call_request = CallRequest(cmd_args, suppress_output=suppress_output)
call_result = call_request.run()
if call_result.exitval:
logger.error('`{}` returned error code {}'.format(' '.join(cmd_args), call_result.exitval))
return call_result | 1556d52de9d620e74c8a4b946c3120cf3579dede | 20,186 |
def provides(interface):
"""
A validator that raises a :exc:`TypeError` if the initializer is called
with an object that does not provide the requested *interface* (checks are
performed using ``interface.providedBy(value)`` (see `zope.interface
<http://docs.zope.org/zope.interface/>`_).
:param interface: The interface to check for.
:type interface: zope.interface.Interface
The :exc:`TypeError` is raised with a human readable error message, the
attribute (of type :class:`attr.Attribute`), the expected interface, and
the value it got.
"""
return _ProvidesValidator(interface) | 9b6e29aa8c3e0a1757daa1c0f3eb455ec66fa594 | 20,187 |
def v_t(r):
"""
Mean thermal velocity
"""
return (8/np.pi)**0.5*c(r) | af475d1376a549abe501b7b47e5f9fa35d8258c1 | 20,188 |
from typing import Callable
from typing import cast
def _state_stateful_alarm_controller(
select_state: Callable[[str], OverkizStateType]
) -> str:
"""Return the state of the device."""
if state := cast(str, select_state(OverkizState.CORE_ACTIVE_ZONES)):
# The Stateful Alarm Controller has 3 zones with the following options:
# (A, B, C, A,B, B,C, A,C, A,B,C). Since it is not possible to map this to AlarmControlPanel entity,
# only the most important zones are mapped, other zones can only be disarmed.
if state in MAP_CORE_ACTIVE_ZONES:
return MAP_CORE_ACTIVE_ZONES[state]
return STATE_ALARM_ARMED_CUSTOM_BYPASS
return STATE_ALARM_DISARMED | 3663d8dda26586dae416ce6d5dbe55fafdb821c8 | 20,189 |
def _connect_new_volume(module, array, answer=False):
"""Connect volume to host"""
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version and module.params['lun']:
try:
array.connect_host(module.params['host'],
module.params['volume'],
lun=module.params['lun'])
answer = True
except Exception:
module.fail_json(msg='LUN ID {0} invalid. Check for duplicate LUN IDs.'.format(module.params['lun']))
else:
array.connect_host(module.params['host'], module.params['volume'])
answer = True
return answer | f6b5dea4e78f832b536fdc269dfe1b9c040cb9b7 | 20,190 |
def is_mongo_configured(accessor):
"""
works out if mongodb is configured to run with trackerdash
i.e. first time running
"""
return accessor.verify_essential_collections_present() | c0487f6d899e6cee4f6bbb31bffbd17890812c30 | 20,191 |
from cms.api import add_plugin
def create_default_children_plugins(request, placeholder, lang, parent_plugin, children_conf):
"""
Create all default children plugins in the given ``placeholder``.
If a child have children, this function recurse.
Return all children and grandchildren (etc.) created
"""
children = list()
grandchildren = list()
for conf in children_conf:
if not permissions.has_plugin_permission(request.user, conf['plugin_type'], "add"):
continue
plugin = add_plugin(placeholder, conf['plugin_type'], lang, **conf['values'])
plugin.parent = parent_plugin
plugin.save()
if 'children' in conf:
grandchildren+= create_default_children_plugins(request, placeholder, lang, plugin, conf['children'])
plugin.notify_on_autoadd(request, conf)
children.append(plugin)
parent_plugin.notify_on_autoadd_children(request, conf, children)
return children + grandchildren | 121106100c50d7ebdace254b711e6d31611dbf3d | 20,192 |
import sympy
import math
def _split_value_equally(delta, count):
"""Splits an integer or rational into roughly equal parts."""
numer = sympy.numer(delta)
denom = sympy.denom(delta)
return [int(math.floor((numer + i) / count)) / denom for i in range(count)] | e241444100b2e0f3c1a589d87c41aa8710fe5b8e | 20,193 |
import ast
def maybe_get_docstring(node: ast.AST):
"""Get docstring from a constant expression, or return None."""
if (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.Constant)
and isinstance(node.value.value, str)
):
return node.value.value
elif (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.Str)
):
return node.value.s | 23171c739f3c9ae6d62ecf3307ac7c3409852d6b | 20,194 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.