content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def ESS(works_prev, works_incremental):
"""
compute the effective sample size (ESS) as given in Eq 3.15 in https://arxiv.org/abs/1303.3123.
Parameters
----------
works_prev: np.array
np.array of floats representing the accumulated works at t-1 (unnormalized)
works_incremental: np.array
np.array of floats representing the incremental works at t (unnormalized)
Returns
-------
normalized_ESS: float
effective sample size
"""
prev_weights_normalized = np.exp(-works_prev - logsumexp(-works_prev))
incremental_weights_unnormalized = np.exp(-works_incremental)
ESS = np.dot(prev_weights_normalized, incremental_weights_unnormalized)**2 / np.dot(np.power(prev_weights_normalized, 2), np.power(incremental_weights_unnormalized, 2))
normalized_ESS = ESS / len(prev_weights_normalized)
assert normalized_ESS >= 0.0 - DISTRIBUTED_ERROR_TOLERANCE and normalized_ESS <= 1.0 + DISTRIBUTED_ERROR_TOLERANCE, f"the normalized ESS ({normalized_ESS} is not between 0 and 1)"
return normalized_ESS | 514ca2462708a4c163f45e92854159d50eb5f3a8 | 16,162 |
def new_line_over():
"""Creates a new line over the cursor.
The cursor is also moved to the beginning of the new line. It is
not possible to create more than one new line over the cursor
at a time for now.
Usage:
`In a config file:`
.. code-block:: yaml
- new_line_over:
`Using the API:`
.. code-block:: python
ezvi.tools.new_line_over()
:rtype: str
:return: Characters that would be used in ``Vi`` to add a new line
over the cursor.
"""
to_write = "O" + ESCAPE
return to_write | 41da4d301240a8ea3d9108dd1d957a30cff1097b | 16,163 |
import json
def lambda_handler(event, context):
"""Calls custom job waiter developed by user
Arguments:
event {dict} -- Dictionary with details on previous processing step
context {dict} -- Dictionary with details on Lambda context
Returns:
{dict} -- Dictionary with Processed Bucket, Key(s) and Job Details
"""
try:
logger.info("Lambda event is [{}]".format(event))
logger.info(event["body"])
source_bucket = event["body"]["bucket"]
job_name = event["body"]["targetJob"]
ddb_table = event["body"]["targetDDBTable"]
token = event["body"]["token"]
s3_prefix_key_proc = event["body"]["keysRawProc"]
logger.info(
"[{}] [{}] [{}] [{}]".format(
source_bucket,
s3_prefix_key_proc,
job_name,
ddb_table,
)
)
# Submitting a new Glue Job
job_response = client.start_job_run(
JobName=job_name,
Arguments={
# Specify any arguments needed based on bucket and keys (e.g. input/output S3 locations)
"--job-bookmark-option": "job-bookmark-enable",
"--additional-python-modules": "pyarrow==2,awswrangler==2.9.0",
# Custom arguments below
"--TARGET_DDB_TABLE": ddb_table,
"--S3_BUCKET": source_bucket,
"--S3_PREFIX_PROCESSED": s3_prefix_key_proc[0]
#
},
MaxCapacity=2.0,
)
logger.info("Response is [{}]".format(job_response))
# Collecting details about Glue Job after submission (e.g. jobRunId for Glue)
json_data = json.loads(json.dumps(job_response, default=datetimeconverter))
job_details = {
"jobName": job_name,
"jobRunId": json_data.get("JobRunId"),
"jobStatus": "STARTED",
"token": token,
}
response = {"jobDetails": job_details}
except Exception as e:
logger.error("Fatal error", exc_info=True)
sagemaker.send_pipeline_execution_step_failure(
CallbackToken=token, FailureReason="error"
)
raise e
return response | e5a4055a39d0df1fabd3ad5f70a2859524378f44 | 16,164 |
def put_path(components, value):
"""Recursive function to put value in component"""
if len(components) > 1:
new = components.pop(0)
value = put_path(components, value)
else:
new = components[0]
return {new: value} | 77db4064a77cf1cdcde1d74d901410525722b66e | 16,165 |
def con_orthogonal_checkboard(X,c_v1,c_v2,c_v3,c_v4,num,N):
"""for principal / isothermic / developable mesh / aux_diamond / aux_cmc
(v1-v3)*(v2-v4)=0
"""
col = np.r_[c_v1,c_v2,c_v3,c_v4]
row = np.tile(np.arange(num),12)
d1 = X[c_v2]-X[c_v4]
d2 = X[c_v1]-X[c_v3]
d3 = X[c_v4]-X[c_v2]
d4 = X[c_v3]-X[c_v1]
data = np.r_[d1,d2,d3,d4]
H = sparse.coo_matrix((data,(row,col)), shape=(num, N))
r = np.einsum('ij,ij->i',d1.reshape(-1,3, order='F'),d2.reshape(-1,3, order='F'))
return H,r | f05228d6caa49f60a2a9f515ce5590e6f13127e0 | 16,166 |
def _PropertyGridInterface_GetPropertyValues(self, dict_=None, as_strings=False, inc_attributes=False):
"""
Returns all property values in the grid.
:param `dict_`: A to fill with the property values. If not given,
then a new one is created. The dict_ can be an object as well,
in which case it's __dict__ is used.
:param `as_strings`: if True, then string representations of values
are fetched instead of native types. Useful for config and such.
:param `inc_attributes`: if True, then property attributes are added
in the form of "@<propname>@<attr>".
:returns: A dictionary with values. It is always a dictionary,
so if dict_ was and object with __dict__ attribute, then that
attribute is returned.
"""
if dict_ is None:
dict_ = {}
elif hasattr(dict_,'__dict__'):
dict_ = dict_.__dict__
getter = self.GetPropertyValue if not as_strings else self.GetPropertyValueAsString
it = self.GetVIterator(PG_ITERATE_PROPERTIES)
while not it.AtEnd():
p = it.GetProperty()
name = p.GetName()
dict_[name] = getter(p)
if inc_attributes:
attrs = p.GetAttributes()
if attrs and len(attrs):
dict_['@%s@attr'%name] = attrs
it.Next()
return dict_ | 06974bec88351d5e8743b43e7c0495bb40545ef0 | 16,167 |
def get_pipelines(exp_type, cal_ver=None, context=None):
"""Given `exp_type` and `cal_ver` and `context`, locate the appropriate SYSTEM CRDSCFG
reference file and determine the sequence of pipeline .cfgs required to process that
exp_type.
"""
context = _get_missing_context(context)
cal_ver = _get_missing_calver(cal_ver)
with log.augment_exception("Failed determining required pipeline .cfgs for",
"EXP_TYPE", srepr(exp_type), "CAL_VER", srepr(cal_ver)):
config_manager = _get_config_manager(context, cal_ver)
return config_manager.exptype_to_pipelines(exp_type) | 7fb4a02ffe7598df4621b2fd4a6863094616fd41 | 16,168 |
def distance_to_line(p,a,b):
"""
Computes the perpendicular distance from a point to an infinite line.
Parameters
----------
p : (x,y)
Coordinates of a point.
a : (x,y)
Coordinates of a point on a line.
b : (x,y)
Coordinates of another point on a line.
Returns
----------
float
The Euclidean distance from p to the infinite line through a & b.
"""
# code by BJK
# area of triangle formed between point and line segment
trianglearea=abs(area([a,b,p]))
# length of line segment
line_length=distance(a,b)
# make sure line segment has a length
if line_length==0:
# a & b are the same, so just calculate distance between points
return distance(p,a)
else:
# the distance we want is the height of the triangle
# area is 1/2 base x height so height is 2*area/base
return 2*trianglearea/line_length | 1b1d0ef37587cd8cb0f5730ac78c39ec8b42faec | 16,169 |
def pearsonr(A, B):
"""
A broadcasting method to compute pearson r and p
-----------------------------------------------
Parameters:
A: matrix A, (i*k)
B: matrix B, (j*k)
Return:
rcorr: matrix correlation, (i*j)
pcorr: matrix correlation p, (i*j)
Example:
>>> rcorr, pcorr = pearsonr(A, B)
"""
if isinstance(A,list):
A = np.array(A)
if isinstance(B,list):
B = np.array(B)
if np.ndim(A) == 1:
A = A[None,:]
if np.ndim(B) == 1:
B = B[None,:]
A_mA = A - A.mean(1)[:, None]
B_mB = B - B.mean(1)[:, None]
ssA = (A_mA**2).sum(1)
ssB = (B_mB**2).sum(1)
rcorr = np.dot(A_mA, B_mB.T)/np.sqrt(np.dot(ssA[:,None], ssB[None]))
df = A.T.shape[1] - 2
r_forp = rcorr*1.0
r_forp[r_forp==1.0] = 0.0
t_squared = rcorr.T**2*(df/((1.0-rcorr.T)*(1.0+rcorr.T)))
pcorr = special.betainc(0.5*df, 0.5, df/(df+t_squared))
return rcorr, pcorr | f66ca9eb6c6367580043ab9d512400c826d30d39 | 16,170 |
def inst_bench(dt, gt, bOpts, tp=None, fp=None, score=None, numInst=None):
"""
ap, rec, prec, npos, details = inst_bench(dt, gt, bOpts, tp = None, fp = None, sc = None, numInst = None)
dt - a list with a dict for each image and with following fields
.boxInfo - info that will be used to cpmpute the overlap with ground truths, a list
.sc - score
gt
.boxInfo - info used to compute the overlap, a list
.diff - a logical array of size nGtx1, saying if the instance is hard or not
bOpt
.minoverlap - the minimum overlap to call it a true positive
[tp], [fp], [sc], [numInst]
Optional arguments, in case the inst_bench_image is being called outside of this function
"""
details = None
if tp is None:
# We do not have the tp, fp, sc, and numInst, so compute them from the structures gt, and out
tp = []
fp = []
numInst = []
score = []
dupDet = []
instId = []
ov = []
for i in range(len(gt)):
# Sort dt by the score
sc = dt[i]["sc"]
bb = dt[i]["boxInfo"]
ind = np.argsort(sc, axis=0)
ind = ind[::-1]
if len(ind) > 0:
sc = np.vstack((sc[i, :] for i in ind))
bb = np.vstack((bb[i, :] for i in ind))
else:
sc = np.zeros((0, 1)).astype(np.float)
bb = np.zeros((0, 4)).astype(np.float)
dtI = dict({"boxInfo": bb, "sc": sc})
tp_i, fp_i, sc_i, numInst_i, dupDet_i, instId_i, ov_i = inst_bench_image(
dtI, gt[i], bOpts
)
tp.append(tp_i)
fp.append(fp_i)
score.append(sc_i)
numInst.append(numInst_i)
dupDet.append(dupDet_i)
instId.append(instId_i)
ov.append(ov_i)
details = {
"tp": list(tp),
"fp": list(fp),
"score": list(score),
"dupDet": list(dupDet),
"numInst": list(numInst),
"instId": list(instId),
"ov": list(ov),
}
tp = np.vstack(tp[:])
fp = np.vstack(fp[:])
sc = np.vstack(score[:])
cat_all = np.hstack((tp, fp, sc))
ind = np.argsort(cat_all[:, 2])
cat_all = cat_all[ind[::-1], :]
tp = np.cumsum(cat_all[:, 0], axis=0)
fp = np.cumsum(cat_all[:, 1], axis=0)
thresh = cat_all[:, 2]
npos = np.sum(numInst, axis=0)
# Compute precision/recall
rec = tp / npos
prec = np.divide(tp, (fp + tp))
ap = VOCap(rec, prec)
return ap, rec, prec, npos, details | 9f8e12863205c24247003a4c95cf52f99086a6a6 | 16,171 |
def normalized_str(token):
"""
Return as-is text for tokens that are proper nouns or acronyms, lemmatized
text for everything else.
Args:
token (``spacy.Token`` or ``spacy.Span``)
Returns:
str
"""
if isinstance(token, SpacyToken):
return token.text if preserve_case(token) else token.lemma_
elif isinstance(token, SpacySpan):
return ' '.join(subtok.text if preserve_case(subtok) else subtok.lemma_
for subtok in token)
else:
msg = 'Input must be a spacy Token or Span, not {}.'.format(type(token))
raise TypeError(msg) | c5e30b48716fa99bfbcf8252b3ecd018cc921cbe | 16,173 |
def scatter_nd(*args, **kwargs):
""" See https://www.tensorflow.org/api_docs/python/tf/scatter_nd .
"""
return tensorflow.scatter_nd(*args, **kwargs) | 5b5d457c91df73314de6d81c105132d6b69eb1aa | 16,174 |
from typing import Union
from typing import Tuple
def concatenate_sequences(X: Union[list, np.ndarray], y: Union[list, np.ndarray],
sequence_to_value: bool = False) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Concatenate multiple sequences to scikit-learn compatible numpy arrays.
´Parameters
-----------
X : Union[list, np.ndarray] of shape=(n_sequences, )
All sequences. Note that all elements in ```X```
must have at least one equal dimension.
y : Union[list, np.ndarray] of shape=(n_sequences, )
All sequences. Note that all elements in ```X```
must have at least one equal dimension.
sequence_to_value : bool, default=False
If true, expand each element of y to the sequence length
Returns
-------
X : np.ndarray of shape=(n_samples, n_features)
Input data where n_samples is the accumulated length of all sequences
y : np.ndarray of shape=(n_samples, n_features) or shape=(n_samples, )
Target data where n_samples is the accumulated length of all sequences
sequence_ranges : Union[None, np.ndarray] of shape=(n_sequences, 2)
Sequence border indicator matrix
"""
if isinstance(X, list):
X = np.asarray(X)
if isinstance(y, list):
y = np.asarray(y)
X = np.array(X)
y = np.array(y)
if sequence_to_value:
for k, _ in enumerate(y):
y[k] = np.repeat(y[k], X[k].shape[0])
check_consistent_length(X, y)
sequence_ranges: np.ndarray = np.ndarray([])
if X.ndim == 1:
sequence_ranges = np.zeros((X.shape[0], 2), dtype=int)
sequence_ranges[:, 1] = np.cumsum([X[k].shape[0] for k, _ in enumerate(X)])
sequence_ranges[1:, 0] = sequence_ranges[:-1, 1]
for k, _ in enumerate(X):
X[k], y[k] = check_X_y(X[k], y[k], multi_output=True)
return np.concatenate(X), np.concatenate(y), sequence_ranges | b4b2489eeb601ce5378f6cf7b2cce7daf68bdf1d | 16,175 |
def run_command_with_code(cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = sp.PIPE
else:
stdout = None
proc = sp.Popen(cmd, stdout=stdout, stderr=sp.PIPE)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
log.error('Command "%s" failed.\n%s', ' '.join(cmd), output)
return output, proc.returncode | 45e8592def8290f45458a183bed410072cc15000 | 16,177 |
from typing import Union
import ast
def _create_element_invocation(span_: span.Span, callee: Union[ast.NameRef,
ast.ModRef],
arg_array: ast.Expr) -> ast.Invocation:
"""Creates a function invocation on the first element of the given array.
We need to create a fake invocation to deduce the type of a function
in the case where map is called with a builtin as the map function. Normally,
map functions (including parametric ones) have their types deduced when their
ast.Function nodes are encountered (where a similar fake ast.Invocation node
is created).
Builtins don't have ast.Function nodes, so that inference can't occur, so we
essentually perform that synthesis and deduction here.
Args:
span_: The location in the code where analysis is occurring.
callee: The function to be invoked.
arg_array: The array of arguments (at least one) to the function.
Returns:
An invocation node for the given function when called with an element in the
argument array.
"""
annotation = ast.TypeAnnotation(
span_, scanner.Token(scanner.TokenKind.KEYWORD, span_,
scanner.Keyword.U32), ())
index_number = ast.Number(
scanner.Token(scanner.TokenKind.KEYWORD, span_, '32'), annotation)
index = ast.Index(span_, arg_array, index_number)
return ast.Invocation(span_, callee, (index,)) | 0449c27fc6e7f16054bddfd99bd9e64109b9ee0e | 16,179 |
import time
def train_deeper_better(train_data, train_labels, test_data, test_labels, params):
"""Same as 'train_deeper', but now with tf.contrib.data.Dataset input pipeline."""
default_params = {
'regularization_coeff': 0.00001,
'keep_prob': 0.5,
'batch_size': 128,
'fc1_size': 2048,
'fc2_size': 1024,
'fc3_size': 1024,
'fc4_size': 1024,
'fc5_size': 512,
'activation': 'relu',
}
activation_funcs = {
'relu': tf.nn.relu,
'tanh': tf.nn.tanh,
}
def get_param(name):
if name in params:
return params[name]
logger.warning('%s not found in param, use default value %r', name, default_params[name])
return default_params[name]
regularization_coeff = get_param('regularization_coeff')
keep_prob_param = get_param('keep_prob')
batch_size = int(get_param('batch_size'))
fc1_size = int(get_param('fc1_size'))
fc2_size = int(get_param('fc2_size'))
fc3_size = int(get_param('fc3_size'))
fc4_size = int(get_param('fc4_size'))
fc5_size = int(get_param('fc5_size'))
activation_func = activation_funcs[get_param('activation')]
save_restore = False
time_limit_seconds = 3600
saver_path = join(SAVER_FOLDER, train_deeper_better.__name__)
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(52)
global_step_tensor = tf.contrib.framework.get_or_create_global_step()
epoch_tensor = tf.Variable(0, trainable=False, name='epoch')
next_epoch = tf.assign_add(epoch_tensor, 1)
# dataset definition
dataset = Dataset.from_tensor_slices({'x': train_data, 'y': train_labels})
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()
sample = iterator.get_next()
x = sample['x']
y = sample['y']
# actual computation graph
keep_prob = tf.placeholder(tf.float32)
is_training = tf.placeholder(tf.bool, name='is_training')
regularizer = tf.contrib.layers.l2_regularizer(scale=regularization_coeff)
def fully_connected(x, size, name):
return dense_regularized(
x, size, is_training, keep_prob, regularizer, name, activation_func,
)
fc1 = fully_connected(x, fc1_size, 'fc1')
fc2 = fully_connected(fc1, fc2_size, 'fc2')
fc3 = fully_connected(fc2, fc3_size, 'fc3')
fc4 = fully_connected(fc3, fc4_size, 'fc4')
fc5 = fully_connected(fc4, fc5_size, 'fc5')
logits = dense(fc5, NUM_CLASSES, regularizer, 'logits')
layer_summaries(logits, 'logits_summaries')
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(logits, 1)), tf.float32),
)
accuracy_percent = 100 * accuracy
tf.summary.scalar('accuracy_percent', accuracy_percent)
with tf.name_scope('loss'):
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
regularization_loss = tf.reduce_sum(regularization_losses)
cross_entropy_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y),
)
loss = cross_entropy_loss + regularization_loss
tf.summary.scalar('regularization_loss', regularization_loss)
tf.summary.scalar('cross_entropy_loss', cross_entropy_loss)
tf.summary.scalar('loss', loss)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# ensures that we execute the update_ops before performing the train_op
# needed for batch normalization (apparently)
optimizer = tf.train.AdamOptimizer(learning_rate=(1e-4), epsilon=1e-3)
train_op = optimizer.minimize(loss, global_step=global_step_tensor)
all_summaries = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(join(SUMMARY_FOLDER, 'train'))
batch_writer = tf.summary.FileWriter(join(SUMMARY_FOLDER, 'batch'))
test_writer = tf.summary.FileWriter(join(SUMMARY_FOLDER, 'test'))
saver = tf.train.Saver(max_to_keep=3)
test_accuracy = 0
best_accuracy = 0
with tf.Session(graph=graph) as sess:
restored = False
if save_restore:
try:
saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir=SAVER_FOLDER))
restored = True
except ValueError as exc:
logger.info('Could not restore previous session! %r', exc)
logger.info('Starting from scratch!')
if not restored:
tf.global_variables_initializer().run()
logger.info('Starting training...')
start_time = time.time()
def enough():
if time_limit_seconds is None:
return False
elapsed = time.time() - start_time
return elapsed > time_limit_seconds
epoch = epoch_tensor.eval()
new_epoch = True
while not enough():
logger.info('Starting new epoch #%d!', epoch)
sess.run(iterator.initializer, feed_dict={})
while not enough():
step = tf.train.global_step(sess, tf.train.get_global_step())
try:
sess.run(train_op, feed_dict={keep_prob: keep_prob_param, is_training: True})
if new_epoch:
new_epoch = False
l, reg_l, ac, summaries = sess.run(
[loss, regularization_loss, accuracy_percent, all_summaries],
feed_dict={keep_prob: keep_prob_param, is_training: False},
)
batch_writer.add_summary(summaries, global_step=step)
logger.info(
'Minibatch loss: %f, reg loss: %f, accuracy: %.2f%%',
l, reg_l, ac,
)
except tf.errors.OutOfRangeError:
logger.info('End of epoch #%d', epoch)
break
# end of epoch
previous_epoch = epoch
epoch = next_epoch.eval()
new_epoch = True
if previous_epoch % 5 == 0 and save_restore:
saver.save(sess, saver_path, global_step=previous_epoch)
def get_eval_dict(data, labels):
"""Data for evaluation."""
return {x: data, y: labels, keep_prob: 1, is_training: False}
train_l, train_ac, summaries = sess.run(
[loss, accuracy_percent, all_summaries],
feed_dict=get_eval_dict(train_data[:10000], train_labels[:10000]),
)
train_writer.add_summary(summaries, global_step=step)
test_l, test_accuracy, summaries = sess.run(
[loss, accuracy_percent, all_summaries],
feed_dict=get_eval_dict(test_data, test_labels),
)
test_writer.add_summary(summaries, global_step=step)
best_accuracy = max(best_accuracy, test_accuracy)
logger.info('Train loss: %f, train accuracy: %.2f%%', train_l, train_ac)
logger.info(
'Test loss: %f, TEST ACCURACY: %.2f%% BEST ACCURACY %.2f%% <<<<<<<',
test_l, test_accuracy, best_accuracy,
)
return best_accuracy | c2d2c56ac7dbb52d072f2397540d4d793ac0d0c4 | 16,181 |
def redirect_return():
"""Redirects back from page with url generated by url_return."""
return redirect(str(Url.get_return())) | f1ce09afef02651e0331a930e53211f9eb4f2a54 | 16,182 |
def setup(coresys: CoreSys) -> EvaluateBase:
"""Initialize evaluation-setup function."""
return EvaluateOperatingSystem(coresys) | daf3bd3ddca0085d6305535b27c28d70ac240dac | 16,183 |
def _weight_initializers(seed=42):
"""Function returns initilializers to be used in the model."""
kernel_initializer = tf.keras.initializers.TruncatedNormal(
mean=0.0, stddev=0.02, seed=seed
)
bias_initializer = tf.keras.initializers.Zeros()
return kernel_initializer, bias_initializer | 1c7652b787d4a69d3a43983c2c291c09337d06d0 | 16,184 |
def get_non_ready_rs_pod_names(namespace):
"""
get names of rs pods that are not ready
"""
pod_names = []
rs_pods = get_pods(namespace, selector='redis.io/role=node')
if not rs_pods:
logger.info("Namespace '%s': cannot find redis enterprise pods", namespace)
return []
for rs_pod in rs_pods:
pod_name = rs_pod['metadata']['name']
if "status" in rs_pod and "containerStatuses" in rs_pod["status"]:
for container_status_entry in rs_pod["status"]["containerStatuses"]:
container_name = container_status_entry['name']
is_ready = container_status_entry["ready"]
if container_name == RLEC_CONTAINER_NAME and not is_ready:
pod_names.append(pod_name)
return pod_names | 167922c4fa03127a3371f2c5b7516bb6462c6253 | 16,186 |
def lookup_material_probase(information_extractor, query, num):
"""Lookup material in Probase"""
material_params = {
'instance': query,
'topK': num
}
result = information_extractor.lookup_probase(material_params)
rank = information_extractor.rank_probase_result_material(result)
return rank | 9cecf99e3a9689f85788df21ef01d4e86c9a392d | 16,187 |
def get_unexpected_exit_events(op):
"""Return all unexpected exit status events."""
events = get_events(op)
if not events:
return None
return [e for e in events if is_unexpected_exit_status_event(e)] | 171158d16c34e2764bc8c91f4888863c162043c4 | 16,188 |
async def delete_user(username: str) -> GenericResponse:
"""Delete concrete user by username"""
try:
await MongoDbWrapper().remove_user(username)
except Exception as exception_message:
raise DatabaseException(error=exception_message)
return GenericResponse(detail="Deleted user") | 8b2756922ab79d058097105fa8cd000396350a3b | 16,189 |
def get_changelog():
"""download ChangeLog.txt from github, extract latest version number, return a tuple of (latest_version, contents)
"""
# url will be chosen depend on frozen state of the application
source_code_url = 'https://github.com/pyIDM/pyIDM/raw/master/ChangeLog.txt'
new_release_url = 'https://github.com/pyIDM/pyIDM/releases/download/extra/ChangeLog.txt'
url = new_release_url if config.FROZEN else source_code_url
# url = new_release_url
# get BytesIO object
log('check for PyIDM latest version ...')
buffer = download(url, verbose=False)
if buffer:
# convert to string
contents = buffer.getvalue().decode()
# extract version number from contents
latest_version = contents.splitlines()[0].replace(':', '').strip()
return latest_version, contents
else:
log("check_for_update() --> couldn't check for update, url is unreachable")
return None | 7c8df0cbc5fa85642e4e23106006445f59539a1f | 16,190 |
def get_train_tags(force=False):
""" Download (if needed) and read the training tags.
Keyword Arguments
-----------------
force : bool
If true, overwrite existing data if it already exists.
"""
download_train_tags(force=force)
return read_tags(train_tags_file_path) | 5d67422a275011a719c0121206397fb99e6e4f70 | 16,191 |
def select_own(ligands, decoys, scores):
"""Select ligand ids and decoy ids from full ranked ids."""
#scores format is full OUTDOCK line
selected = set(ligands)
selected.update(decoys)
results = []
for scoreline in scores:
#id = scoreline[extract_all.zincCol] #refer to correct column always
id = scoreline[extract_all.zincCol].split('.')[0] #refer to correct column always
# maybe in this form: zinccode.prot
#print id
if id in selected:
results.append(scoreline)
#print scoreline
return results | 444555a30571e61fad7eac36389e2dd638313744 | 16,192 |
def cmp_text_file(text, file):
"""returns True when text and file content are identical
"""
fh = open(file)
ftext = fh.read()
fh.close()
return cmp(ftext, text) | ecf10004cd3fa230d0e794c4c89e45ca91e7e40e | 16,194 |
def get_alignment_summary(seq_info):
"""
Determine the consensus sequence of an alignment, and create position matrix
Definition of consensus: most common base represented at that position.
"""
consensus_sequence = []
position_matrix = []
for position in seq_info:
#Ignore any ambiguous basecalls - accept A, T, C, G, and 'gap'
base_counts = {
'a':position['bases'].count('a')+position['bases'].count('A'),
't':position['bases'].count('t')+position['bases'].count('T'),
'c':position['bases'].count('c')+position['bases'].count('C'),
'g':position['bases'].count('g')+position['bases'].count('G'),
'-':position['bases'].count('-'),
}
#print(base_counts)
max_basecalls = [key for key, count in base_counts.items() if count == max(base_counts.values())]
if len(max_basecalls) == 1:
consensus_sequence.append(max_basecalls[0])
else:
consensus_sequence.append('n')
#Assembling position_matrix
position_matrix.append(base_counts)
return (''.join(consensus_sequence), position_matrix) | f91e4dcea2f4570a194524970fdbc95eacc455b2 | 16,195 |
def _get_variables(exp:Experiment, config: dict) -> dict:
"""Process the configuration's variables before rendering it"""
return {key: value.format(exp=exp) for key, value in config.get("variables", {}).items()} | 1b819c93ef079557908c216dc5c9fa75d55fe0f3 | 16,197 |
def func_calc_M(S):
"""
Use molecules structure/symbol to calculate molecular weight
Parameter:
S : structrue in a format: (atomType number) separated by '-' or blank space
number of '-' and spaces does not matter
precendent: '-' > blank space
Example 1:
C2H3O4N5
Example 2:
C2 - H3 - O4 - N5
Example 3:
C2 H3 O4 N5
Example 4:
C2 H3 - O4 - N5
Return:
M : molecular weight (g/mol)
"""
##Test list
##Slist = [ 123, ' ', '- - ', '---', '1,2,','1 +','4 $', #bad
# 'C3H4O5Br1Cl2', 'CHOBrCl','Br Br BrBr', #good
# 'C3 - H -2 - 2 - O', 'C3 - H2 2 - O' #bad]
log = {'nice':True, }
# define Periodic Table
PT = { 'H':1.008, 'B':10.81, 'C':12.01, 'N':14.01, 'O':16.00, 'F':19.00,
'P':30.91, 'S':32.06, 'Cl':35.45, 'Br':79.90, 'I':126.90 }
if not isinstance(S,str):
log['nice'] = False
log['info'] = 'Error: Molecule structure has to be a string'
return log, 0.0
S = S.lower()
proS = []
# format: split by '-' then split by blank space
for t in S.split('-'): proS += t.split()
if len(proS) == 0:
log['nice'] = False
log['info'] = 'Error: empty inputs'
return log, 0.0
proSS = []
# 1D: split to [ character number character number ]
for t in proS:
if t.isdigit():
proSS.append(int(t))
elif t.isalpha():
proSS.append(t)
elif t.isalnum():
stmp = ''
for c in t:
if c.isdigit():
if stmp.isalpha():
proSS.append(stmp)
stmp = ''
else:
if stmp.isdigit():
proSS.append(int(stmp))
stmp = ''
stmp += c
if stmp.isdigit():
proSS.append(int(stmp))
else:
proSS.append(stmp)
else:
log['nice'] = False
log['info'] = 'Error: input < {:} > is not correctly defined'.format(t)
return log, 0.0
proSSS = []
# 1D: split to [ atomtype number atomtype number ]
for t in proSS:
if isinstance(t,int):
proSSS.append(t)
else:
# for character, it may have special cases like Br, Cl
while True:
if 'br' in t or 'cl' in t:
ndx = t.find('br') if 'br' in t else t.find('cl')
if ndx > 0: proSSS += [ c for c in t[:ndx] ]
proSSS.append(t[ndx:ndx+2])
if len(t) >= ndx + 2:
t = t[ndx+2:]
else:
proSSS += [ c for c in t ]
break
else:
proSSS += [ c for c in t ]
break
# No adjacent numbers is allowed
# However the number of each adjacent character is defined at 1
# Consider cases like:
# C 1 2 H <bad>
# C C C 3 <good>
# C 1 H 3 <good>
if not isinstance(proSSS[0],str):
log['nice'] = False
log['info'] = 'Error: the atomtype has to be in the first input along with its numbers\n' + \
' : < {:} > is not correctly defined'.format(proSSS[0])
return log, 0.0
bo = False
for t in proSSS:
if isinstance(t,int):
if bo:
log['nice'] = False
stmp = t
break
bo = True
else:
bo = False
if not log['nice']:
log['info'] = 'Error: no adjacent number inputs is allowd\n' + \
' : < {:} > is not correctly defined'.format(stmp)
return log, 0.0
i = 0
proSSSS = []
# 2D: [ [atomtype, number], [atomtype, number], ... ]
while i < len(proSSS):
j = i + 1
if j < len(proSSS) and isinstance(proSSS[j],int):
proSSSS.append([proSSS[i],proSSS[j]])
i = j
else:
proSSSS.append([proSSS[i],1])
i += 1
# time to check for Periodic Table
M = 0.0
for t in proSSSS:
tmp = t[0].capitalize()
if tmp in PT:
M += PT[tmp] * t[1]
else:
log['nice'] = False
log['info'] = 'Error: atomtype < {:} > is not defined in Periodic Table'.format(tmp)
break
return log, M | ed8e3d5ccd5305caccfac64cb0ecb200fde650eb | 16,198 |
def find_NN(ngbrof, ngbrin, distance_ULIM=NP.inf, flatten=False, parallel=False,
nproc=None):
"""
-----------------------------------------------------------------------------
Find all nearest neighbours of one set of locations in another set of
locations within a specified distance.
Inputs:
ngbrof [numpy array] Locations for nearest neighbours are to be
determined. Has dimensions MxK where M is the number of locations.
ngbrin [numpy array] Locations from which nearest neighbours are to be
chosen for the locations in ngbrof. Has dimensions NxK.
distance_ULIM
[scalar] Maximum search radius to look for neighbours.
Default=NP.inf
flatten [boolean] If set to True, flattens the output of the nearest
neighbour search algorithm to yield two separate sets of matching
indices - one for ngbrof and the other for ngbrin. Default=False
parallel [boolean] specifies if parallelization is to be invoked. False
(default) means only serial processing. Parallelization is done
over ngbrof
nproc [scalar] specifies number of independent processes to spawn.
Default=None, means automatically determines the number of
process cores in the system and use one less than that to
avoid locking the system for other processes. Applies only
if input parameter 'parallel' (see above) is set to True.
If nproc is set to a value more than the number of process
cores in the system, it will be reset to number of process
cores in the system minus one to avoid locking the system out
for other processes
Outputs:
List containing three items. The first item is a list of M lists where each
of the M inner lists corresponds to one entry in ngbrof and the elements in
the inner list contains indices to ngbrin that are the nearest neighbours of
that specific ngbrof (same as output of cKDTree.query_ball_tree()). The
second item in the output list is a numpy array of indices to ngbrof
(obtained from the first item if input keyword flatten is set to True) or
None (if input keyword flatten is set to False). The third item in the output
list is a numpy array of indices to ngbrin that is a valid neighbour of
ngbrof (obtained from the first item if input keyword flatten is set to
True) or None (if input keyword flatten is set to False).
-----------------------------------------------------------------------------
"""
try:
ngbrof, ngbrin
except NameError:
raise NameError('ngbrof and ngbrin must be specified for finding nearest neighbours.')
if (ngbrof.shape[1] != ngbrin.shape[1]):
raise ValueError('ngbrof and ngbrin must contain same number of columns')
if parallel or (nproc is not None):
if nproc is None:
nproc = max(MP.cpu_count()-1, 1)
else:
nproc = min(nproc, max(MP.cpu_count()-1, 1))
split_ind = NP.arange(ngbrof.shape[0]/nproc, ngbrof.shape[0], ngbrof.shape[0]/nproc)
split_ngbrof_list = NP.split(ngbrof, split_ind, axis=0)
ngbrin_list = [ngbrin] * len(split_ngbrof_list)
distance_ULIM_list = [distance_ULIM] * len(split_ngbrof_list)
pool = MP.Pool(processes=nproc)
lolol = pool.map(find_NN_arg_splitter, IT.izip(split_ngbrof_list, ngbrin_list, distance_ULIM_list))
pool.close()
pool.join()
indNN_list = [subitem for item in lolol for subitem in item]
else:
kdtself = KDT(ngbrof)
kdtother = KDT(ngbrin)
indNN_list = kdtself.query_ball_tree(kdtother, distance_ULIM, p=2.0)
ind_ngbrof = None
ind_ngbrin = None
if flatten:
list_of_ind_tuples = [(i,ind) for i,item in enumerate(indNN_list) for ind in item]
ind_ngbrof, ind_ngbrin = zip(*list_of_ind_tuples)
return [indNN_list, NP.asarray(ind_ngbrof), NP.asarray(ind_ngbrin)] | 131d136ad92900f3ee624982f70234070d0d76a6 | 16,199 |
from datetime import datetime
def index(request):
"""Magicaltastic front page.
Plugins can register a hook called 'frontpage_updates_<type>' to add
updates to the front page. `<type>` is an arbitrary string indicating
the sort of update the plugin knows how to handle; for example,
spline-forum has a `frontpage_updates_forum` hook for posting news from
a specific forum.
Hook handlers should return a list of FrontPageUpdate objects.
Standard hook parameters are:
`limit`, the maximum number of items that should ever be returned.
`max_age`, the number of seconds after which items expire.
`title`, a name for the source.
`icon`, an icon to show next to its name.
`limit` and `max_age` are also global options.
Updates are configured in the .ini like so:
spline-frontpage.sources.foo = updatetype
spline-frontpage.sources.foo.opt1 = val1
spline-frontpage.sources.foo.opt2 = val2
Note that the 'foo' name is completely arbitrary and is only used for
grouping options together. This will result in a call to:
run_hooks('frontpage_updates_updatetype', opt1=val1, opt2=val2)
Plugins may also respond to the `frontpage_extras` hook with other
interesting things to put on the front page. There's no way to
customize the order of these extras or which appear and which don't, at
the moment. Such hooks should return an object with at least a
`template` attribute; the template will be called with the object
passed in as its `obj` argument.
Local plugins can override the fairly simple index.mako template to
customize the front page layout.
"""
response = request.response
config = request.registry.settings
cache = request.environ.get('beaker.cache', None)
c = request.tmpl_context
updates = []
global_limit = config['spline-frontpage.limit']
global_max_age = max_age_to_datetime(
config['spline-frontpage.max_age'])
c.sources = config['spline-frontpage.sources']
for source in c.sources:
new_updates = source.poll(global_limit, global_max_age, cache)
updates.extend(new_updates)
# Little optimization: once there are global_limit items, anything
# older than the oldest cannot possibly make it onto the list. So,
# bump global_max_age to that oldest time if this is ever the case.
updates.sort(key=lambda obj: obj.time, reverse=True)
del updates[global_limit:]
if updates and len(updates) == global_limit:
global_max_age = updates[-1].time
# Find the oldest unseen item, to draw a divider after it.
# If this stays as None, the divider goes at the top
c.last_seen_item = None
# Could have a timestamp in a cookie
last_seen_time = None
try:
last_seen_time = datetime.datetime.fromtimestamp(
int(request.cookies['frontpage-last-seen-time']))
except (KeyError, ValueError):
pass
if last_seen_time:
for update in updates:
if update.time > last_seen_time:
c.last_seen_item = update
else:
break
# Save ~now~ as the last-seen time
now = datetime.datetime.now().strftime('%s')
response.set_cookie('frontpage-last-seen-time', now)
# Done! Feed to template
c.updates = updates
# Hook for non-update interesting things to put on the front page.
# This hook should return objects with a 'template' attribute, and
# whatever else they need
c.extras = []
return {} | 14e4200c2277e48792fd4d02f0126293a82a9ba8 | 16,200 |
def fd_d1_o4_smoothend(var,grid,mat=False):
"""Centered finite difference, first derivative, 4th order using extrapolation to get boundary points
var: quantity to be differentiated.
grid: grid for var
mat: matrix for the finite-differencing operator. if mat=False then it is created"""
dx = grid[1]-grid[0]
grid0 = np.linspace(grid[0]-2*dx,grid[-1]+2*dx,len(grid)+4)
var0 = interp(grid,var,grid0)
if not mat:
mat=get_mat_fd_d1_o4(len(var0),grid0[1]-grid0[0])
dvar0=-np.dot(mat,var0)
dvar_out=dvar0[2:-2]
return -dvar_out | e1b57204e6fd9fe2839e4fb2e7230dd0f8854841 | 16,201 |
def find_node_pair_solutions(node_pairs, graph):
""" Return path and cost for all node pairs in the path sets. """
node_pair_solutions = {}
counter = 0
for node_pair in node_pairs:
if node_pair not in node_pair_solutions:
cost, path = dijkstra.find_cost(node_pair, graph)
node_pair_solutions[node_pair] = (cost, path)
# Also store the reverse pair
node_pair_solutions[node_pair[::-1]] = (cost, path[::-1])
return node_pair_solutions | f2f742cc1e969b4b60394148508cbb9cacaa3cfc | 16,202 |
import math
def get_step(a, b, marks=1):
"""Return a coordinate set between ``a`` and ``b``.
This function returns a coordinate point between the two provided
coordinates. It does this by determining the angle of the path
between the two points and getting the sine and cosine from that
angle. The returned coordinate will be ``marks`` away from ``a``.
It is worth noting that if the distance between the two points,
calculated by ``get_distance``, is less than the value of ``marks``,
then a copy of ``b`` is returned.
Args:
a (list): A tuple is also acceptable. This list will have two
items, either ``int``s or ``float``s.
b (list): Exactly the same requirements as ``a``. It can (and
usually will be) a different coordinate.
marks (:obj:`int`, optional): One mark is the measurement
between two adjacent coordinates. To step over a greater
number of coordinates, increase the number of ``marks``.
Returns:
tuple: The returned tuple is a new coordinate set. The location
of the coordinates is determined by ``marks`` and angle
connecting ``a`` and ``b``.
"""
if get_distance(a, b) <= marks:
return b[:]
angle = math.atan2(
-(a[1] - b[1]),
-(a[0] - b[0]),
)
return (
(math.cos(angle) * marks) + a[0],
(math.sin(angle) * marks) + a[1],
) | e242823df263f1cee28409ef3f984f9b3066dad5 | 16,203 |
import torch
def get_model_mask_neurons(model, layers):
"""
Defines a dictionary of type {layer: tensor} containing for each layer of a model, the binary mask representing
which neurons have a value of zero (all of its parameters are zero).
:param model: PyTorch model.
:param layers: Tuple of layers on which apply the threshold procedure. e.g. (nn.modules.Conv2d, nn.modules.Linear)
:return: Mask dictionary.
"""
mask = {}
for n_m, mo in model.named_modules():
if isinstance(mo, layers):
for n_p, p in mo.named_parameters():
name = "{}.{}".format(n_m, n_p)
if "weight" in n_p:
if isinstance(mo, nn.modules.Linear):
sum = torch.abs(p).sum(dim=1)
mask[name] = torch.where(sum == 0, torch.zeros_like(sum), torch.ones_like(sum))
elif isinstance(mo, nn.modules.Conv2d):
sum = torch.abs(p).sum(dim=(1, 2, 3))
mask[name] = torch.where(sum == 0, torch.zeros_like(sum), torch.ones_like(sum))
elif isinstance(mo, nn.modules.ConvTranspose2d):
sum = torch.abs(p).sum(dim=(0, 2, 3))
mask[name] = torch.where(sum == 0, torch.zeros_like(sum), torch.ones_like(sum))
else:
mask[name] = torch.where(p == 0, torch.zeros_like(p), torch.ones_like(p))
else:
mask[name] = torch.where(p == 0, torch.zeros_like(p), torch.ones_like(p))
return mask | 2e24af14d05802bac69b65a225ce284b5a7785e7 | 16,204 |
def connection():
"""Open a new connection or return the cached existing one"""
try:
existing_connection = GLOBAL_CACHE[CACHE_KEY_CONNECTION]
except KeyError:
new_connection = win32com.client.Dispatch(ADO_CONNECTION)
new_connection.Provider = CONNECTION_PROVIDER
new_connection.Open(CONNECTION_TARGET)
return GLOBAL_CACHE.setdefault(CACHE_KEY_CONNECTION, new_connection)
#
if not existing_connection.state:
# Reopen the connection if necessary
existing_connection.Open(CONNECTION_TARGET)
#
return existing_connection | f2c09fac89e0b0c9f9894869bb559ce61bca942a | 16,205 |
def precision(theta,X,Y):
"""
accuracy function
computes the accuracy of the logistic model theta on X with true target variable Y
"""
m = np.shape(X)[0]
H = sigmoid(np.dot(X,theta))
H[H >= 0.5] = 1
H[H < 0.5] = 0
return np.sum(H == Y)/m | e3b2c1c613f5ae2f20b2b9a8e6e343348be845df | 16,207 |
def get_converter(obj, coords=None, dims=None, chains=None):
"""Get the converter to transform a supported object to an xarray dataset.
This function sends `obj` to the right conversion function. It is idempotent,
in that it will return xarray.Datasets unchanged.
Parameters
----------
obj : A dict, or an object from PyStan or PyMC3 to convert
coords : dict[str, iterable]
A dictionary containing the values that are used as index. The key
is the name of the dimension, the values are the index values.
dims : dict[str, Tuple(str)]
A mapping from pymc3 variables to a tuple corresponding to
the shape of the variable, where the elements of the tuples are
the names of the coordinate dimensions.
chains : int or None
The number of chains sampled from the posterior, only necessary for
converting dicts.
Returns
-------
xarray.Dataset
The coordinates are those passed in and ('chain', 'draw')
"""
if isinstance(obj, dict):
return DictToXarray(obj, coords, dims, chains=chains)
elif obj.__class__.__name__ == 'StanFit4Model': # ugly, but doesn't make PyStan a requirement
return PyStanToXarray(obj, coords, dims)
elif obj.__class__.__name__ == 'MultiTrace': # ugly, but doesn't make PyMC3 a requirement
return PyMC3ToXarray(obj, coords, dims)
else:
raise TypeError('Can only convert PyStan or PyMC3 object to xarray, not {}'.format(
obj.__class__.__name__)) | ee293672d74de5f0e1de0ff25c806fa10327c71c | 16,208 |
import pytz
def timezone_by_tzvar(tzvar):
"""Convert a WWTS tzvar to a tzdata timezone"""
return pytz.timezone(city_by_tzvar(tzvar)) | 0bc4d634ca5fcc55ceed062ae06fbe2eefb6c11a | 16,209 |
def easy_map(parser, token):
"""
The syntax:
{% easy_map <address> [<width> <height>] [<zoom>] [using <template_name>] %}
The "address" parameter can be an Address instance or a string describing it.
If an address is not found a new entry is created in the database.
"""
width, height, zoom, template_name = None, None, None, None
params = token.split_contents()
# pop the template name
if params[-2] == 'using':
template_name = params[-1]
params = params[:-2]
if len(params) < 2:
raise template.TemplateSyntaxError('easy_map tag requires address argument')
address = params[1]
if len(params) == 4:
width, height = params[2], params[3]
elif len(params) == 5:
width, height, zoom = params[2], params[3], params[4]
elif len(params) == 3 or len(params) > 5:
raise template.TemplateSyntaxError('easy_map tag has the following syntax: '
'{% easy_map <address> <width> <height> [zoom] [using <template_name>] %}')
return EasyMapNode(address, width, height, zoom, template_name) | b2968f6ff3cde324711f84a5b449fbab92cc22fa | 16,211 |
import six
def pack_feed_dict(name_prefixs, origin_datas, paddings, input_fields):
"""
Args:
name_prefixs: A prefix string of a list of strings.
origin_datas: Data list or a list of data lists.
paddings: A padding id or a list of padding ids.
input_fields: A list of input fields dict.
Returns: A dict for while loop.
"""
data = dict()
data["feed_dict"] = dict()
def map_fn(n, d, p):
# n: name prefix
# d: data list
# p: padding symbol
data[concat_name(n, Constants.IDS_NAME)] = d
n_samples = len(d)
n_devices = len(input_fields)
n_samples_per_gpu = n_samples // n_devices
if n_samples % n_devices > 0:
n_samples_per_gpu += 1
def _feed_batchs(_start_idx, _inpf):
if _start_idx * n_samples_per_gpu >= n_samples:
return 0
x, x_len = padding_batch_data(
d[_start_idx * n_samples_per_gpu:(_start_idx + 1) * n_samples_per_gpu], p)
data["feed_dict"][_inpf[concat_name(n, Constants.IDS_NAME)]] = x
data["feed_dict"][_inpf[concat_name(n, Constants.LENGTH_NAME)]] = x_len
return len(x_len)
parallels = repeat_n_times(
n_devices, _feed_batchs,
list(range(n_devices)), input_fields)
data["feed_dict"]["parallels"] = parallels
if isinstance(name_prefixs, six.string_types):
map_fn(name_prefixs, origin_datas, paddings)
else:
[map_fn(n, d, p) for n, d, p in zip(name_prefixs, origin_datas, paddings)]
return data | 2946a8869cac26737f6c5b6234ce0320cfdf5bcf | 16,212 |
def get_session_maker():
"""
Return an sqlalchemy sessionmaker object using an engine from get_engine().
"""
return sessionmaker(bind=get_engine()) | 2f1a500cf799910f98e7821582cb78d063eeb273 | 16,213 |
def rescale_intensity(arr, in_range, out_range):
""" Return arr after stretching or shrinking its intensity levels.
Parameters
----------
arr: array
input array.
in_range, out_range: 2-tuple
min and max intensity values of input and output arr.
Returns
-------
out: array
array after rescaling its intensity.
"""
imin, imax = in_range
omin, omax = out_range
out = np.clip(arr, imin, imax)
out = (out - imin) / float(imax - imin)
return out * (omax - omin) + omin | 580c789a6eb2ad03bcbdefd8e5f27b0c6a239f32 | 16,214 |
import requests
def call_oai_api(resumption_token):
"""
Request page of data from the Argitrop OAI API
Parameters
----------
resumption_token : object (first page) or string or xml.etree.ElementTree.Element
token returned by previous request.
Returns
-------
response_xml : string
Response text as XML string
resumption_token : xml.etree.ElementTree.Element
tocken for requesting the next page
"""
oai_api_url = cfg.OAI_ENDPOINT_START % cfg.OAI_DATASET_NAME
if isinstance(resumption_token, ET.Element):
oai_api_url = cfg.OAI_ENDPOINT_CONTINUE % resumption_token.text
if isinstance(resumption_token, str):
oai_api_url = cfg.OAI_ENDPOINT_CONTINUE % resumption_token
headers = {'User-Agent': '%s' % cfg.USER_AGENT }
logger.info('Calling OAI API: %s', oai_api_url)
response = requests.get(oai_api_url, verify=True, headers=headers)
response_xml = ET.fromstring(response.text)
resumption_token = response_xml.find('oai:ListRecords', cfg.OAI_NS).find('oai:resumptionToken', cfg.OAI_NS)
return response_xml, resumption_token | e69ec11f75676a94134f4541b421391367ab1e3c | 16,216 |
import yaml
def save_pano_config(p):
"""
saves a panorama config file to the local disk from the session vars.
:return:
"""
filename = get_filename(p)
with open(filename, 'w') as yml_fh:
yml_fh.write(yaml.dump(session[p + '_config'], default_flow_style=False))
return redirect("/export") | 6a2575af4fe54caed7ce812d3fc2a876424912f7 | 16,217 |
def is_transport(name):
"""Test if all parts of a name are transport coefficients
For example, efe_GB, chie_GB_div_efi_GB are all composed of transport
coefficients, but gam_GB and chiee_GB_plus_gam_GB are not.
"""
transport = True
try:
for part_name in extract_part_names(split_parts(name)):
transport &= split_name(part_name)[0] in heat_vars + particle_vars + momentum_vars
except ValueError:
transport = False
return transport | 1aea3915680b3c74422cbd7648fd920719dd3cc8 | 16,219 |
def detect_moved_files(file_manifest, diff):
""" Detect files that have been moved """
previous_hashes = defaultdict(set)
for item in file_manifest['files']: previous_hashes[item['hash']].add(item['path'])
diff_dict = make_dict(diff)
# files with duplicate hashes are assumed to have the same contents
moved_files = {}
not_found = []
for val in diff:
if val['status'] == 'new' and val['hash'] in previous_hashes:
found = None; prev_filtered = []
for itm in previous_hashes[val['hash']]:
if itm.split('/')[-1] == val['path'].split('/')[-1]: found = itm; break
if found != None and found in diff_dict and diff_dict[found]['status'] == 'delete':
previous_hashes[val['hash']].remove(found)
moved_files[val['path']] = {'from' : found, 'to' : val['path']}
else: not_found.append(val)
# At this point all duplicate items which have been moved but which retain the original name
# have been removed from there relevant set. Remaining items are assigned on an ad-hoc basis.
# As there hashes are the same, there contents is assumed to be the same so mis-assignments
# are not very important.
for val in not_found:
itm = previous_hashes[val['hash']].pop()
if itm in diff_dict and diff_dict[itm]['status'] == 'delete':
moved_files[val['path']] = {'from' : itm, 'to' : val['path']}
# Replace separate 'new' and 'delete' with a single 'moved' command.
for key, value in moved_files.iteritems():
moved_from = diff_dict.pop(value['from']) # remove the delete from the diff
moved_to = diff_dict[value['to']]
diff_dict[value['to']] = moved_from # start with where the file was moved from
diff_dict[value['to']]['status'] = 'moved'
diff_dict[value['to']]['moved_from'] = value['from']
diff_dict[value['to']]['path'] = moved_to['path'] # Copy the moved path
diff_dict[value['to']]['created'] = moved_to['created'] # Copy 'created' from the moved file
diff_dict[value['to']]['last_mod'] = moved_to['last_mod'] # Copy last_mod from the moved file
return [change for p, change in diff_dict.iteritems()] | db97dfb88d4fa253351e149dacf68a9fa3043072 | 16,220 |
def decodecaps(blob):
"""decode a bundle2 caps bytes blob into a dictionary
The blob is a list of capabilities (one per line)
Capabilities may have values using a line of the form::
capability=value1,value2,value3
The values are always a list."""
caps = {}
for line in blob.splitlines():
if not line:
continue
if b'=' not in line:
key, vals = line, ()
else:
key, vals = line.split(b'=', 1)
vals = vals.split(b',')
key = urlreq.unquote(key)
vals = [urlreq.unquote(v) for v in vals]
caps[key] = vals
return caps | 3c18bbe6b4b6a0562719d4992d6937d60f6bc114 | 16,221 |
def an(pos=5):
"""
Alineamiento del texto.
@pos:
1: Abajo izquierda
2: Abajo centro
3: Abajo derecha
4: Mitad derecha
5: Mitad centro
6: Mitad derecha
7: Arriba izquierda
8: Arriba centro
9: Arriba derecha
"""
apos = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
if pos not in apos:
raise ValueError('\n\nan(pos):\n<pos> solo acepta los '
'sigientes valores: ' + str(apos))
else:
return '\\an{:d}'.format(pos) | fbe1e89282ebdf7b4977bee295e2cac7735bd652 | 16,223 |
def main(iterator):
"""
Given a line iterator of the bash file, returns a dictionary of
keys to values
"""
values = {}
for line in iterator:
if not line.startswith('#') and len(line.strip()) > 0:
match_obj = line_regex.search(line)
if match_obj is not None:
key, value = match_obj.group(1), match_obj.group(2)
values[key] = try_parse(value)
return values | 16cc188b367200c317119348d9440d57faa322a9 | 16,225 |
def is_any(typeref: irast.TypeRef) -> bool:
"""Return True if *typeref* describes the ``anytype`` generic type."""
return isinstance(typeref, irast.AnyTypeRef) | 75ca055529fea35dfeb2519c3de61bf3739ce1f7 | 16,226 |
from typing import Union
def repeat_1d(inputs: tf.Tensor, count: Union[tf.Tensor, int], name="repeat_1d"):
"""Repeats each element of `inputs` `count` times in a row.
'''python
repeat_1d(tf.range(4), 2) -> 0, 0, 1, 1, 2, 2, 3, 3
'''
Parameters:
inputs: A 1D tensor with shape [`size`] to be repeated.
count: An integer, used to specify the number of time elements of `inputs` are repeated.
name: An optional string to specify the `name_scope` of this operation.
Returns:
A 1D tensor with shape [`size` * `count`] and same type as `inputs`.
"""
with tf.name_scope(name):
outputs = tf.expand_dims(inputs, 1)
outputs = tf.tile(outputs, [1, count])
outputs = tf.reshape(outputs, [-1])
return outputs | 44a8bb29dcd2ba0e2e5970aff1eab94b85a34c13 | 16,227 |
import logging
def create_logger(logfile=r"/tmp/tomoproc.log"):
"""Default logger for exception tracking"""
logger = logging.getLogger("tomoproc_logger")
logger.setLevel(logging.INFO)
# create the logging file handler
fh = logging.FileHandler(logfile)
fh.setFormatter(
logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
)
# add handler to logger object
logger.addHandler(fh)
return logger | a0c005c39af9d24d7198790cf0cfe31a1b6395a0 | 16,228 |
async def async_setup(opp: OpenPeerPower, config: ConfigType) -> bool:
"""Set up the Twente Milieu components."""
async def update(call) -> None:
"""Service call to manually update the data."""
unique_id = call.data.get(CONF_ID)
await _update_twentemilieu(opp, unique_id)
opp.services.async_register(DOMAIN, SERVICE_UPDATE, update, schema=SERVICE_SCHEMA)
return True | f2c0dd14e9193b9fa3ae3ea87689e90d9eb2c1bc | 16,229 |
import re
def parsePDCfile(fpath='data/CPTAC2_Breast_Prospective_Collection_BI_Proteome.tmt10.tsv'):
"""
Takes a PDC file ending in .tmt10.tsv or .itraq.tsv and creates
tidied data frame with Gene, Patient, logratio and diffFromMean values
Parameters
----------
fpath : chr, optional
DESCRIPTION. The default is 'data/CPTAC2_Breast_Prospective_Collection_BI_Proteome.tmt10.tsv'.
Return
-------
None.
"""
dat = pd.read_csv(fpath, sep='\t')
newdat = dat[['Gene', 'NCBIGeneID']]
#retrieve log ratios
pat = re.compile('.*[0-9]+\ Log Ratio')
pats = list(filter(pat.match, dat.keys()))
for pat in pats:
up_pat = pat.replace(' Log Ratio', '')
newdat[up_pat] = dat[pat]
#now tidy data by log ratio by patient
tdat = pd.melt(newdat, id_vars=['Gene', 'NCBIGeneID'],\
var_name='Patient', value_name='logratio')
return tdat | 48b421d965e9b7f337a1f58c3665643eba514a7c | 16,230 |
def ave(x):
"""
Returns the average value of a list.
:param x: a given list
:return: the average of param x
"""
return np.mean(x) | ad7737321d9f0fc8461129b0153f40da2d75dc70 | 16,231 |
def information_gain(f1, f2):
"""
This function calculates the information gain, where ig(f1,f2) = H(f1) - H(f1|f2)
Input
-----
f1: {numpy array}, shape (n_samples,)
f2: {numpy array}, shape (n_samples,)
Output
------
ig: {float}
"""
ig = entropyd(f1) - conditional_entropy(f1, f2)
return ig | 39c60bf6a9fbf18f4d5ba3af609fed53771bd817 | 16,232 |
def subsequent_chunk_mask(
size: int,
chunk_size: int,
num_left_chunks: int=-1, ) -> paddle.Tensor:
"""Create mask for subsequent steps (size, size) with chunk size,
this is for streaming encoder
Args:
size (int): size of mask
chunk_size (int): size of chunk
num_left_chunks (int): number of left chunks
<0: use full chunk
>=0: use num_left_chunks
Returns:
paddle.Tensor: mask, [size, size]
Examples:
>>> subsequent_chunk_mask(4, 2)
[[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1]]
"""
ret = paddle.zeros([size, size], dtype=paddle.bool)
for i in range(size):
if num_left_chunks < 0:
start = 0
else:
start = max(0, (i // chunk_size - num_left_chunks) * chunk_size)
ending = min(size, (i // chunk_size + 1) * chunk_size)
ret[i, start:ending] = True
return ret | 512def08ef2fe35cdd80ba7eb92f30b73aef1782 | 16,234 |
def top_tags(request):
"""
Shows a list of the most-used Tags.
Context::
object_list
The list of Tags
Template::
cab/top_tags.html
"""
return render_to_response('cab/top_tags.html',
{ 'object_list': Snippet.objects.top_items('tag', 20) },
context_instance=RequestContext(request)) | 07cf792fb3bd0ed5a1185986fb3154cb645b2a75 | 16,235 |
def check_integer_sign(value):
"""
:param value:
:return:
"""
return value >= 0 | 0ab012b62bf7b12ecabea8d1a4538bb30e197e07 | 16,236 |
import torch
def masks_empty(sample, mask_names):
""" Tests whether a sample has any non-masked values """
return any(not torch.any(sample[name] != 0) for name in mask_names) | 4c13b123fe6f5a17c3cd2ee673c54de331af7b23 | 16,237 |
def quantize_factor(factor_data, quantiles=5, bins=None, by_group=False):
"""
Computes period wise factor quantiles.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for each period,
The factor quantile/bin that factor value belongs too, and (optionally) the group the
asset belongs to.
quantiles : int or sequence[float]
Number of equal-sized quantile buckets to use in factor bucketing.
Alternately sequence of quantiles, allowing non-equal-sized buckets
e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]
Only one of 'quantiles' or 'bins' can be not-None
bins : int or sequence[float]
Number of equal-width (valuewise) bins to use in factor bucketing.
Alternately sequence of bin edges allowing for non-uniform bin width
e.g. [-4, -2, -0.5, 0, 10]
Only one of 'quantiles' or 'bins' can be not-None
by_group : bool
If True, compute quantile buckets separately for each group.
Returns
-------
factor_quantile : pd.Series
Factor quantiles indexed by date and asset.
"""
def quantile_calc(x, _quantiles, _bins):
if _quantiles is not None:
return pd.qcut(x, _quantiles, labels=False) + 1
elif _bins is not None:
return pd.cut(x, _bins, labels=False) + 1
raise ValueError('quantiles or bins should be provided')
grouper = [factor_data.index.get_level_values('date')]
if by_group:
grouper.append('group')
factor_quantile = factor_data.groupby(grouper)['factor'].apply(quantile_calc, quantiles, bins)
factor_quantile.name = 'factor_quantile'
return factor_quantile.dropna() | 1b51b84e9f22a1b0e0c2bb578a2011c1b8f725e2 | 16,238 |
def listSplit(aList, n):
"""将一个列表以n个元素为一个单元进行均分,返回嵌套列表"""
return [aList[i:i+n] for i in range(0,len(aList),n)] | 936d4ff5b3bbbc39c57c01dc6a12e42b7dc6e0de | 16,240 |
import json
def refs(request):
""" Настройка назначения анализов вместе """
if request.method == "GET":
rows = []
fraction = directory.Fractions.objects.get(pk=int(request.GET["pk"]))
for r in directory.References.objects.filter(fraction=fraction).order_by("pk"):
rows.append(
{
'pk': r.pk,
'title': r.title,
'about': r.about,
'ref_m': json.loads(r.ref_m) if isinstance(r.ref_m, str) else r.ref_m,
'ref_f': json.loads(r.ref_f) if isinstance(r.ref_f, str) else r.ref_f,
'del': False,
'hide': False,
'isdefault': r.pk == fraction.default_ref_id,
}
)
return JsonResponse(rows, safe=False)
elif request.method == "POST":
pk = int(request.POST["pk"])
default = int(request.POST["default"])
if pk > -1:
fraction = directory.Fractions.objects.get(pk=pk)
for r in json.loads(request.POST["refs"]):
r["ref_m"].pop("", None)
r["ref_f"].pop("", None)
if r["del"] and r["pk"] != -1:
directory.References.objects.filter(pk=r["pk"]).delete()
if r["pk"] == default:
default = -1
elif not r["del"] and r["pk"] == -1:
nrf = directory.References(title=r["title"], about=r["about"], ref_m=r["ref_m"], ref_f=r["ref_f"], fraction=fraction)
nrf.save()
if r["isdefault"]:
default = nrf.pk
else:
row = directory.References.objects.get(pk=r["pk"])
row.title = r["title"]
row.about = r["about"]
row.ref_m = json.dumps(r["ref_m"])
row.ref_f = json.dumps(r["ref_f"])
row.save()
fraction.default_ref = None if default == -1 else directory.References.objects.get(pk=default)
fraction.save()
return JsonResponse({"ok": True}) | 7635525efbdab8e22c21019f8de9cc74a83c9c2a | 16,241 |
def transform(data):
"""replace the data value in the sheet if it is zero
:param data: data set
:return: data set without zero
"""
data_transformed = data.applymap(zero2minimum)
return data_transformed | b717c6f42c8f0ae0c68c97647a33e77aec2f1508 | 16,242 |
def findChildren(node, name):
"""Returns all the children of input node, with a matching name.
Arguments:
node (dagNode): The input node to search
name (str): The name to search
Returns:
dagNode list: The children dagNodes
"""
return __findChildren(node, name, False) | 9258dac1261e24d3cc5e58030147ce693fbd0356 | 16,243 |
def get_process_rss(force_update=False, pid=None):
"""
<Purpose>
Returns the Resident Set Size of a process. By default, this will
return the information cached by the last call to _get_proc_info_by_pid.
This call is used in get_process_cpu_time.
<Arguments>
force_update:
Allows the caller to force a data update, instead of using the cached data.
pid:
If force_update is True, this parameter must be specified to force the update.
<Exceptions>
See _get_proc_info_by_pid.
<Returns>
The RSS of the process in bytes.
"""
global last_proc_info_struct
# Check if an update is being forced
if force_update and pid != None:
# Update the info
_get_proc_info_by_pid(pid)
# Get RSS
rss_pages = last_proc_info_struct.ki_rssize
rss_bytes = rss_pages * PAGE_SIZE
return rss_bytes | 99c1c3fd35db4bb22c2c37aba48ddb7049ec26fa | 16,244 |
from typing import Dict
def doc_to_dict(doc) -> Dict:
"""Takes whatever the mongo doc is and turns into json serializable dict"""
ret = {k: stringify_mongovalues(v) for k, v in doc.items() if k != "_id"}
ret["_id"] = str(doc["_id"])
return ret | 9e3f72568cf25ac864c1add2989c8e1cb064661d | 16,245 |
def add_srv_2cluster(cluster_name, srvjson):
"""
添加服务到数据库
:param cluster_name:
:param srvjson:
:return:
"""
status = ''
message = ''
resp = {"status": status, "message": message}
host_name = srvjson.get('host_name')
service_name = srvjson.get('service_name')
sfo_clu_node = SfoClusterNodesMethod.query_host_by_host_name(host_name)
if not sfo_clu_node:
raise ValueError('Not Found Node Host %s' % host_name)
swift_op = SwiftServiceOperation(sfo_clu_node.node_inet_ip)
try:
content = swift_op.install_service(service_name)
except Exception, error:
status = 501
message = str(error)
else:
status = 200
message = content
resp.update({"status": status, "message": message})
return resp, status | 2e3c6ec6a312016785affbc71c5c2f178a0ecd84 | 16,246 |
def _add_left_zeros(number, iteration_digits):
"""Add zeros to the left side of the experiment run number.
Zeros will be added according to missing spaces until iterations_digits are
reached.
"""
number = str(number)
return f'{"0" * (iteration_digits - len(number))}{number}' | e3f86a7e7f276ceff4eb662a3f5bc364b4d10ea3 | 16,247 |
def sharpdiff(y_true, y_pred):
"""
@param y_true: tensor of shape (batch_size, height, width, channels)
@param y_pred: tensor of shape (batch_size, height, width, channels)
@return: the sharpness difference as a scalar
"""
def log10(tensor):
numerator = tf.math.log(tensor);
denominator = tf.math.log(tf.constant(10, dtype = numerator.dtype));
return numerator / denominator;
shape = tf.shape(y_pred);
num_pixels = tf.cast(shape[1] * shape[2] * shape[3], tf.float32);
y_true_dy, y_true_dx = tf.image.image_gradients(y_true);
y_pred_dy, y_pred_dx = tf.image.image_gradients(y_pred);
pred_grad_sum = y_pred_dx + y_pred_dy;
true_grad_sum = y_true_dx + y_true_dy;
grad_diff = tf.abs(true_grad_sum - pred_grad_sum);
grad_diff_red = tf.reduce_sum(grad_diff, [1, 2, 3]);
batch_errors = 10 * log10(1 / ((1 / num_pixels) * grad_diff_red));
return tf.reduce_mean(batch_errors); | 0c08541fd5c551c5a2ca1afb598adfc627c06286 | 16,248 |
import logging
def admin_setfriend():
""" Set the friend state of a user """
uid = request.args.get("uid", "")
state = request.args.get("state", "1") # Default: set as friend
try:
state = bool(int(state))
except Exception:
return (
"<html><body><p>Invalid state string: '{0}'</p></body></html>"
.format(state)
)
u = User.load_if_exists(uid) if uid else None
if u is None:
return "<html><body><p>Unknown user id '{0}'</p></body></html>".format(uid)
was_friend = u.friend()
u.set_friend(state)
u.set_has_paid(state)
u.update()
logging.info("Friend state of user {0} manually set to {1}".format(uid, state))
return (
"<html><body><p>User '{0}': friend state was '{2}', set to '{1}'</p></body></html>"
.format(uid, state, was_friend)
) | f4b3a04b18735320968513666ad5901a68e5a492 | 16,249 |
def LF_CG_BICLUSTER_BINDS(c):
"""
This label function uses the bicluster data located in the
A global network of biomedical relationships
"""
sen_pos = c.get_parent().position
pubmed_id = c.get_parent().document.name
query = bicluster_dep_df.query("pubmed_id==@pubmed_id&sentence_num==@sen_pos")
if not(query.empty):
if query["B"].sum() > 0.0:
return 1
return 0 | 29aeb5af69257a9c762bccc45c09e68d0799174c | 16,250 |
from typing import List
import random
def single_point_crossover(parents: List[Chromosome], probability: float = 0.7) -> List[Chromosome]:
""" Make the crossover of two parents to generate two child.
The crossover has a probability to be made.
The crossover point is random.
:param parents: selected parents
:param probability: probability that the crossover is made
:return: offspring
"""
cut_point = random.randint(1, len(parents[1].genes) - 1)
if random.random() < probability:
first_child = Chromosome(parents[0].genes[:cut_point] + parents[1].genes[cut_point:])
second_child = Chromosome(parents[1].genes[:cut_point] + parents[0].genes[cut_point:])
else:
first_child = Chromosome(parents[0].genes.copy())
second_child = Chromosome(parents[1].genes.copy())
return [first_child, second_child] | 4e8dd96fc42a8a1a1feb7c1c3dad42892e060425 | 16,252 |
def get_node_count(network=None, base_url=DEFAULT_BASE_URL):
"""Reports the number of nodes in the network.
Args:
network (SUID or str or None): Name or SUID of a network or view. Default is the
"current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
int: count of nodes in network.
Raises:
ValueError: if server response has no JSON
CyError: if network name or SUID doesn't exist
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> get_node_count()
6
>>> get_node_count(52)
6
>>> get_node_count('galFiltered.sif')
6
"""
net_suid = get_network_suid(network, base_url=base_url)
res = commands.cyrest_get(f'networks/{net_suid}/nodes/count', base_url=base_url)
return res['count'] | c80e34443c4e39a96496eca5867333800b0208c5 | 16,253 |
from typing import Dict
from typing import Any
from typing import Tuple
from typing import Optional
import re
def process_sample(
sample: Dict[str, Any],
relation_vocab: Dict[str, int],
spacy_model: Any,
tokenizer: Any,
) -> Tuple[Optional[Dict[str, Any]], Dict[str, int]]:
"""Processes WebRED sample and updates relation vocabulary.
To process a raw WebRED example, we first extract subj and obj and remove the
annotations from the text. The resulting text is parsed with a spacy model to
find mention spans, and then tokenized with a BERT tokenizer. If necessary, we
override some spacy mentions with the subj and obj WebRED mentions.
Args:
sample: raw WebRED sample. Needs to contain following fields: token, list of
token strings. relation, string describing relation between subj and obj.
relation_vocab: dictionary mapping relation strings to integer labels.
spacy_model: spacy model used to detect mentions.
tokenizer: BERT tokenizer.
Returns:
Processed WebRED sample and updated relation vocabulary.
"""
processed_sample = {}
if sample['num_pos_raters'] < 2:
relation = NO_RELATION
else:
relation = sample['relation']
if relation not in relation_vocab:
relation_vocab[relation] = len(relation_vocab)
label = relation_vocab[relation]
processed_sample['target'] = [label]
text = sample['annotated_text']
# Remove subj and obj annotations from text and store position
def find_span(input_text: str, pattern: Any,
prefix_len: int) -> Tuple[int, int]:
"""Find span corresponding to actual subj or obj strings."""
match = pattern.search(input_text)
span_start = match.start() + prefix_len + 1
# We want inclusive spans, hence -2 instead of -1
span_end = match.end() - 2
return (span_start, span_end)
def replace_and_adjust(
input_text: str, match: Any, prefix_len: int,
inverted_mapping: np.ndarray) -> Tuple[str, np.ndarray]:
"""Remove subj/obj annotations and adjust token mapping accordingly."""
original_span_start = match.start() + prefix_len + 1
original_span_end = match.end() - 1
actual_string = input_text[original_span_start:original_span_end]
new_text = input_text[:match.start()] + actual_string + input_text[match
.end():]
# Inverted mapping maps from remaining tokens to positions in original text
new_inverted_mapping = np.zeros(len(new_text), dtype=np.int32)
new_inverted_mapping[:match.start()] = inverted_mapping[:match.start()]
new_span_start = match.start()
new_span_end = match.start() + len(actual_string)
new_inverted_mapping[new_span_start:new_span_end] = inverted_mapping[
original_span_start:original_span_end]
new_inverted_mapping[new_span_end:] = inverted_mapping[original_span_end +
1:]
return new_text, new_inverted_mapping
inverted_mapping = np.arange(len(text))
subj_pattern = re.compile('SUBJ{[^}]+}')
subj_span = find_span(text, subj_pattern, len('SUBJ'))
obj_pattern = re.compile('OBJ{[^}]+}')
obj_span = find_span(text, obj_pattern, len('OBJ'))
# Remove subj/obj annotations from text
while True:
subj_match = subj_pattern.search(text)
if subj_match is None:
break
text, inverted_mapping = replace_and_adjust(text, subj_match, len('SUBJ'),
inverted_mapping)
while True:
obj_match = obj_pattern.search(text)
if obj_match is None:
break
text, inverted_mapping = replace_and_adjust(text, obj_match, len('OBJ'),
inverted_mapping)
# Adjust spans for removed tokens
mapping = np.zeros(len(sample['annotated_text']), dtype=np.int32) - 1
mapping[inverted_mapping] = np.arange(len(inverted_mapping))
subj_span = (mapping[subj_span[0]], mapping[subj_span[1]])
assert subj_span[0] != -1 and subj_span[1] != -1
obj_span = (mapping[obj_span[0]], mapping[obj_span[1]])
assert obj_span[0] != -1 and obj_span[1] != -1
parsed_text = spacy_model(text)
# We use spacy to parse text, identify noun chunks
mention_char_spans = []
mention_char_spans.append(subj_span)
mention_char_spans.append(obj_span)
def overlaps(first_span: Tuple[int, int], second_span: Tuple[int,
int]) -> bool:
def point_inside_span(point: int, span: Tuple[int, int]) -> bool:
return span[0] >= point and point <= span[1]
spans_overlap = (
point_inside_span(first_span[0], second_span) or
point_inside_span(first_span[1], second_span) or
point_inside_span(second_span[0], first_span) or
point_inside_span(second_span[1], first_span))
return spans_overlap
for chunk in parsed_text.noun_chunks:
span_start_char = parsed_text[chunk.start].idx
span_last_token = parsed_text[chunk.end - 1]
span_end_char = span_last_token.idx + len(span_last_token.text) - 1
char_span = (span_start_char, span_end_char)
# Append only if does not overlap with subj or obj spans. In case spacy
# mention annotation disagrees with tacred annotation, we want to favor
# tacred.
if not overlaps(char_span, subj_span) and not overlaps(char_span, obj_span):
mention_char_spans.append(char_span)
# Sort spans by start char
start_chars = np.array([span[0] for span in mention_char_spans])
sorted_indices = np.argsort(start_chars)
sorted_positions = np.zeros_like(start_chars)
sorted_positions[sorted_indices] = np.arange(len(sorted_positions))
sorted_spans = [mention_char_spans[idx] for idx in sorted_indices]
# Tokenize and get aligned mention positions
_, text_ids, text_mask, mention_spans, span_indices = tokenization_utils.tokenize_with_mention_spans(
tokenizer=tokenizer,
sentence=text,
spans=sorted_spans,
max_length=FLAGS.max_length,
add_bert_tokens=True,
allow_truncated_spans=True,
)
processed_sample['text_ids'] = text_ids
processed_sample['text_mask'] = text_mask
# Subj and obj are the first elements of mention spans.
subj_index = sorted_positions[0]
obj_index = sorted_positions[1]
# Some spans may be dropped by the BERT tokenizer. Here we map indices in the
# original list of spans to the one returned by the tokenizer.
reverse_span_indices = {
original_idx: tokenized_idx
for tokenized_idx, original_idx in enumerate(span_indices)
}
# Skip if subj or obj dropped.
if (subj_index not in reverse_span_indices or
obj_index not in reverse_span_indices):
return None, relation_vocab
subj_index = reverse_span_indices[subj_index]
obj_index = reverse_span_indices[obj_index]
# Make sure we don't discard subj or obj
assert max(subj_index, obj_index) < FLAGS.max_mentions
processed_sample['subject_mention_indices'] = [subj_index]
processed_sample['object_mention_indices'] = [obj_index]
mention_spans = np.array(mention_spans)
mention_start_positions = mention_spans[:, 0]
mention_end_positions = mention_spans[:, 1]
mention_start_positions = mention_start_positions[:FLAGS.max_mentions]
mention_end_positions = mention_end_positions[:FLAGS.max_mentions]
mention_pad_shape = (0, FLAGS.max_mentions - len(mention_start_positions))
mention_mask = np.ones(len(mention_start_positions), dtype=np.int64)
mention_mask = np.pad(mention_mask, mention_pad_shape, mode='constant')
mention_start_positions = np.pad(
mention_start_positions, mention_pad_shape, mode='constant')
mention_end_positions = np.pad(
mention_end_positions, mention_pad_shape, mode='constant')
processed_sample['mention_start_positions'] = mention_start_positions
processed_sample['mention_end_positions'] = mention_end_positions
processed_sample['mention_mask'] = mention_mask
return processed_sample, relation_vocab | 74a80fb69fdebb35c86830f54344fc770ad91cd4 | 16,254 |
import tempfile
def transform_s3(key, bucket="songsbuckettest"):
"""
REMEBER TO DO DEFENSIVE PROGRAMMING, WRAP IN TRY/CATCH
"""
s3 = boto3.client('s3')
# print("connection to s3 -- Test")
with tempfile.NamedTemporaryFile(mode='wb') as tmp:
s3.download_fileobj(bucket, key, tmp)
try:
return process_h5_file(tmp.name)
except Exception as e:
return [] | 3e7419185ab3c3581ea24227c204fd207b113b1e | 16,255 |
def get_active_users(URM, popular_threshold=100):
"""
Get the users with activity above a certain threshold
:param URM: URM on which users will be extracted
:param popular_threshold: popularty threshold
:return:
"""
return _get_popular(URM, popular_threshold, axis=1) | 6b05e1a4288e00903ce9b396407c4e3547402710 | 16,256 |
def default_data_to_device(
input, target=None, device: str = "cuda", non_blocking: bool = True
):
"""Sends data output from a PyTorch Dataloader to the device."""
input = input.to(device=device, non_blocking=non_blocking)
if target is not None:
target = target.to(device=device, non_blocking=non_blocking)
return input, target | 8dafddbd52b54a576ddc67d7d79af4372fbd57dc | 16,257 |
def _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, context,
column_width, tab_spaces, patch_filename=None):
"""Helper function that returns objects for diff2 views"""
ps_left = models.PatchSet.get_by_id(int(ps_left_id), parent=request.issue.key)
if ps_left is None:
return HttpTextResponse(
'No patch set exists with that id (%s)' % ps_left_id, status=404)
ps_left.issue_key = request.issue.key
ps_right = models.PatchSet.get_by_id(
int(ps_right_id), parent=request.issue.key)
if ps_right is None:
return HttpTextResponse(
'No patch set exists with that id (%s)' % ps_right_id, status=404)
ps_right.issue_key = request.issue.key
if patch_id is not None:
patch_right = models.Patch.get_by_id(int(patch_id), parent=ps_right.key)
else:
patch_right = None
if patch_right is not None:
patch_right.patchset_key = ps_right.key
if patch_filename is None:
patch_filename = patch_right.filename
# Now find the corresponding patch in ps_left
patch_left = models.Patch.query(
models.Patch.filename == patch_filename,
ancestor=ps_left.key).get()
if patch_left:
try:
new_content_left = patch_left.get_patched_content()
except FetchError as err:
return HttpTextResponse(str(err), status=404)
lines_left = new_content_left.lines
elif patch_right:
lines_left = patch_right.get_content().lines
else:
lines_left = []
if patch_right:
try:
new_content_right = patch_right.get_patched_content()
except FetchError as err:
return HttpTextResponse(str(err), status=404)
lines_right = new_content_right.lines
elif patch_left:
lines_right = patch_left.get_content().lines
else:
lines_right = []
rows = engine.RenderDiff2TableRows(request,
lines_left, patch_left,
lines_right, patch_right,
context=context,
colwidth=column_width,
tabspaces=tab_spaces)
rows = list(rows)
if rows and rows[-1] is None:
del rows[-1]
return dict(patch_left=patch_left, patch_right=patch_right,
ps_left=ps_left, ps_right=ps_right, rows=rows) | 47aef66544acec7d57125f3c7c0f8edb385ba150 | 16,258 |
def vec_add(iter_a, iter_b):
"""element wise addition"""
if len(iter_a) != len(iter_b):
raise ValueError
return (a + b for a, b in zip(iter_a, iter_b)) | f3e5bf50d61cfe518ee8b0eb838503a7f054baa8 | 16,259 |
def run():
""" Read inputs into a dictionary for recursive searching """
for line in inputs:
# Strip the trailing "." and split
container, rest = line[:-1].split(" contain ")
# Strip the trailing " bags"
container = container[:-5]
contained = []
for bag in rest.split(", "):
if bag[:2] != "no":
# Strip the leading number and the trailing "bags" or " bag"
contained.append(bag[2:-4].strip())
bags[container] = contained
return sum(1 if search(bag) else 0 for bag in bags) | c3b565efbb923562c13955d808cf6ac2f09b616b | 16,260 |
def _get_prolongation_coordinates(grid, d1, d2):
"""Calculate required coordinates of finer grid for prolongation."""
D2, D1 = np.broadcast_arrays(
getattr(grid, 'vectorN'+d2), getattr(grid, 'vectorN'+d1)[:, None])
return np.r_[D1.ravel('F'), D2.ravel('F')].reshape(-1, 2, order='F') | 6534c456413cd062f9c35c14f5d9b57b1aba6c12 | 16,261 |
def get_info(obj):
"""
get info from account obj
:type obj: account object
:param obj: the object of account
:return: dict of account info
"""
if obj:
return dict(db_instance_id=obj.dbinstance_id,
account_name=obj.account_name,
account_status=obj.account_status,
account_type=obj.account_type,
account_description=obj.account_description,
database_privileges=obj.database_privileges)
return {} | c654ab1bdb4b4bf20223172dae450e1e7e6a52b9 | 16,263 |
def vlookup(x0, vals, ind, approx=True):
"""
Equivalent to the spreadsheet VLOOKUP function
:param vals: array_like
2d array of values - first column is searched for index
:param x0:
:param ind:
:param approx:
:return:
"""
if isinstance(vals[0][0], str):
x0 = str(x0)
if not approx: # need exact match
return vals[int(ind)][np.where(x0 == np.array(vals[0]))[0][0]]
else:
inds = np.searchsorted(vals[0], x0, side='right') - 1
return vals[ind][int(inds)] | 59ee6ecd7c001bf6cf3f03ad678d93eda33f5e21 | 16,264 |
def matmul(a00, a10, a01, a11, b00, b10, b01, b11):
"""
Compute 2x2 matrix mutiplication in vector way
C = A*B
C = [a00 a01] * [b00 b01] = [c00 c01]
[a10 a11] [b10 b11] [c10 c11]
"""
c00 = a00*b00 + a01*b10
c10 = a10*b00 + a11*b10
c01 = a00*b01 + a01*b11
c11 = a10*b01 + a11*b11
return c00, c10, c01, c11 | d34506cc8099cbbf8b7a9e1eb9d4d068d768ebac | 16,265 |
import collections
import random
def random_sample_with_weight_and_cost(population, weights, costs, cost_limit):
"""
Like random_sample_with_weight but with the addition of a cost and limit.
While performing random samples (with priority for higher weight) we'll keep track of cost
If cost exceeds the cost limit, we stop selecting
Basically the knapsack problem, but with deliberately random selection rather than dynamic optimization
"""
population_weights = {request: weight for (request, weight) in zip(population, weights)}
population_costs = {request: cost for (request, cost) in zip(population, costs)}
selected = []
not_selected = []
cost = 0
# Create a Counter from the population, assigning count by weight
counter = collections.Counter(population_weights)
while counter:
# Turn the Counter into a list for random selection from
# The list will have n repetitions of an element with weight n
choice = random.choice(list(counter.elements()))
choice_cost = population_costs[choice]
# If the cost would cause us to exceed our limit it shouldn't be selected
if cost + choice_cost > cost_limit:
not_selected.append(choice)
else:
cost += choice_cost
selected.append(choice)
# When chosen (whether selected or not), remove the element from the population
# Effectively removes all repetitions of the element
counter.pop(choice)
return selected, not_selected | 637afd1c0e83bbda879f41bd15feb0f65b238fb3 | 16,266 |
def hardnet68ds(pretrained=False, **kwargs):
""" # This docstring shows up in hub.help()
Harmonic DenseNet 68ds (Depthwise Separable) model
pretrained (bool): kwargs, load pretrained weights into the model
"""
# Call the model, load pretrained weights
model = hardnet.HarDNet(depth_wise=True, arch=68, pretrained=pretrained)
return model | 5167b79f8effdb9a4b94e9d0a7902f35468a1d8b | 16,267 |
def get_config():
"""Base config for training models."""
config = ml_collections.ConfigDict()
# How often to save the model checkpoint.
config.save_checkpoints_steps: int = 1000
# Frequency fo eval during training, e.g. every 1000 steps.
config.eval_frequency: int = 1000
# Total batch size for training.
config.train_batch_size: int = 32
# Total batch size for eval.
config.eval_batch_size: int = 8
# The base learning rate for Adam.
config.learning_rate: float = 1e-4
# Initial checkpoint directory (usually from a pre-trained model).
config.init_checkpoint_dir: str = ''
# Whether to lower case the input text. Should be True for uncased models and
# False for cased models.
config.do_lower_case: bool = True
# Model parameters.
# For pre-training, we only need 2 segment types (for NSP), but we allow up to
# 4 for GLUE/SuperGLUE fine-tuning.
config.type_vocab_size: int = 4
# Embedding dimension for each token.
config.d_emb: int = 768
# Hidden dimension of model.
config.d_model: int = 768
# Hidden dimension for feed-forward layer.
config.d_ff: int = 3072
# The maximum total input sequence length after tokenization. Sequences longer
# than this will be truncated, and sequences shorter than this will be padded.
config.max_seq_length: int = 512
# Number of self-attention heads. Only used for BERT models.
config.num_heads: int = 12
# Number of model blocks / layers.
config.num_layers: int = 12
# Regular dropout rate, applied throughout model.
config.dropout_rate: float = 0.1
# Dropout rate used in mixing module, e.g. self-attention sublayer.
config.mixing_dropout_rate: float = 0.1
# Determines how discrete Fourier Transforms are computed. Only used for FNet
# models. Set to true if running on TPU hardware, in which case matrix
# multiplications will be favored for relatively shorter input sequences. Set
# to false for GPU/CPU hardware, in which case FFTs are used for all input
# sequence lengths.
config.use_tpu_fourier_optimizations: bool = False
# Dummy parameter for repeated runs.
config.trial: int = 0
return config | 67dfe8aff3f1a3e660d9debccc181690ea561ae2 | 16,268 |
def slave_addresses(dns):
"""List of slave IP addresses
@returns: str Comma delimited list of slave IP addresses
"""
return ', '.join(['{}:53'.format(s['address'])
for s in dns.pool_config]) | e293442272496f02a58055dd778ecfe875124ccd | 16,269 |
def processAndLabelStates(role, states, reason, positiveStates=None, negativeStates=None, positiveStateLabelDict={}, negativeStateLabelDict={}):
"""Processes the states for an object and returns the appropriate state labels for both positive and negative states.
@param role: The role of the object to process states for (e.g. C{ROLE_CHECKBOX}.
@type role: int
@param states: The raw states for an object to process.
@type states: set
@param reason: The reason to process the states (e.g. C{REASON_FOCUS}.
@type reason: str
@param positiveStates: Used for C{REASON_CHANGE}, specifies states changed from negative to positive;
@type positiveStates: set
@param negativeStates: Used for C{REASON_CHANGE}, specifies states changed from positive to negative;
@type negativeStates: setpositiveStateLabelDict={}, negativeStateLabelDict
@param positiveStateLabelDict: Dictionary containing state identifiers as keys and associated positive labels as their values.
@type positiveStateLabelDict: dict
@param negativeStateLabelDict: Dictionary containing state identifiers as keys and associated negative labels as their values.
@type negativeStateLabelDict: dict
@return: The labels of the relevant positive and negative states.
@rtype: [str, ...]
"""
mergedStateLabels=[]
positiveStates = processPositiveStates(role, states, reason, positiveStates)
negativeStates = processNegativeStates(role, states, reason, negativeStates)
for state in sorted(positiveStates | negativeStates):
if state in positiveStates:
mergedStateLabels.append(positiveStateLabelDict.get(state, stateLabels[state]))
elif state in negativeStates:
# Translators: Indicates that a particular state of an object is negated.
# Separate strings have now been defined for commonly negated states (e.g. not selected and not checked),
# but this still might be used in some other cases.
# %s will be replaced with the full identifier of the negated state (e.g. selected).
mergedStateLabels.append(negativeStateLabelDict.get(state, negativeStateLabels.get(state, _("not %s") % stateLabels[state])))
return mergedStateLabels | 23be0c7d943961f756a02abea98c51500f92b00f | 16,270 |
def shape_for_stateful_rnn(data, batch_size, seq_length, seq_step):
"""
Reformat our data vector into input and target sequences to feed into our
RNN. Tricky with stateful RNNs.
"""
# Our target sequences are simply one timestep ahead of our input sequences.
# e.g. with an input vector "wherefore"...
# targets: h e r e f o r e
# predicts ^ ^ ^ ^ ^ ^ ^ ^
# inputs: w h e r e f o r
inputs = data[:-1]
targets = data[1:]
# We split our long vectors into semi-redundant seq_length sequences
inputs = _create_sequences(inputs, seq_length, seq_step)
targets = _create_sequences(targets, seq_length, seq_step)
# Make sure our sequences line up across batches for stateful RNNs
inputs = _batch_sort_for_stateful_rnn(inputs, batch_size)
targets = _batch_sort_for_stateful_rnn(targets, batch_size)
# Our target data needs an extra axis to work with the sparse categorical
# crossentropy loss function
targets = targets[:, :, np.newaxis]
return inputs, targets | 431eb54acc9bfe2281a3a863335eb135f050f47e | 16,271 |
import time
import tqdm
def setup_features(dataRaw, label='flux', notFeatures=[], pipeline=None, verbose=False, resample=False, returnAll=None):
"""Example function with types documented in the docstring.
For production level usage: All scaling and transformations must be done
with respect to the calibration data distributions
Args:
features (nD-array): Array of input raw features.
labels (1D-array): The second parameter.
pipeline (int): The first parameter.
label_scaler (str): The second parameter.
feature_scaler (str): The second parameter.
Returns:
features_transformed, labels_scaled
.. _PEP 484:
https://github.com/ExoWanderer/
"""
# if label in notFeatures: notFeatures.remove(label)
if isinstance(dataRaw,str):
dataRaw = pd.read_csv(filename)
elif isinstance(dataRaw, dict):
dataRaw = pd.DataFrame(dataRaw)
elif not isinstance(dataRaw, pd.DataFrame):
raise TypeError('The input must be a `pandas.DataFrame` or a `dict` with Equal Size Entries (to convert to df here)')
# WHY IS THIS ALLOWED TO NOT HAVE PARENTHESES?
# assert isinstance(dataRaw, pd.DataFrame), 'The input must be a Pandas DataFrame or Dictionary with Equal Size Entries'
inputData = dataRaw.copy()
# PLDpixels = pd.DataFrame({key:dataRaw[key] for key in dataRaw.columns if 'pix' in key})
pixCols = [colname for colname in inputData.columns if 'pix' in colname.lower() or 'pld' in colname.lower()]
PLDnorm = np.sum(np.array(inputData[pixCols]),axis=1)
inputData[pixCols] = (np.array(inputData[pixCols]).T / PLDnorm).T
# # Overwrite the PLDpixels entries with the normalized version
# for key in dataRaw.columns:
# if key in PLDpixels.columns:
# inputData[key] = PLDpixels[key]
#
# Assign the labels
n_PLD = len([key for key in dataRaw.keys() if 'err' not in colname.lower() and ('pix' in key.lower() or 'pld' in key.lower())])
input_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' not in colname.lower()]
errors_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' in colname.lower()]
# resampling_inputs = ['flux', 'xpos', 'ypos', 'xfwhm', 'yfwhm', 'bg_flux', 'bmjd', 'np'] + ['pix{}'.format(k) for k in range(1,10)]
# resampling_errors = ['fluxerr', 'xerr', 'yerr', 'xerr', 'yerr', 'sigma_bg_flux', 'bmjd_err', 'np_err'] + ['fluxerr']*n_PLD
start = time()
if resample:
print("Resampling ", end=" ")
inputData = pd.DataFrame({colname:np.random.normal(dataRaw[colname], dataRaw[colerr]) \
for colname, colerr in tqdm(zip(input_labels, errors_labels), total=len(input_labels))
})
print("took {} seconds".format(time() - start))
else:
inputData = pd.DataFrame({colname:dataRaw[colname] for colname in input_labels})
labels = dataRaw[label].values
# explicitly remove the label
if label in inputData.columns: inputData.drop(label, axis=1, inplace=True)
feature_columns = [colname for colname in inputData.columns if colname not in notFeatures]
features = inputData[feature_columns].values
if verbose: print('Shape of Features Array is', features.shape)
if verbose: start = time()
# labels_scaled = labels# label_scaler.fit_transform(labels[:,None]).ravel() if label_scaler is not None else labels
features_trnsfrmd = pipeline.fit_transform(features) if pipeline is not None else features
if verbose: print('took {} seconds'.format(time() - start))
collection = features_trnsfrmd, labels
if returnAll == True:
collection = features_trnsfrmd, labels, pipeline
if returnAll == 'features':
collection = features_trnsfrmd
if returnAll == 'with raw data':
collection.append(dataRaw)
return collection | 7c1fb86dc66d97610bd1d22ef65ccb88e105dd92 | 16,272 |
from typing import List
from typing import Any
def plot_marginal_effects(model: ModelBridge, metric: str) -> AxPlotConfig:
"""
Calculates and plots the marginal effects -- the effect of changing one
factor away from the randomized distribution of the experiment and fixing it
at a particular level.
Args:
model: Model to use for estimating effects
metric: The metric for which to plot marginal effects.
Returns:
AxPlotConfig of the marginal effects
"""
plot_data, _, _ = get_plot_data(model, {}, {metric})
arm_dfs = []
for arm in plot_data.in_sample.values():
arm_df = pd.DataFrame(arm.parameters, index=[arm.name])
arm_df["mean"] = arm.y_hat[metric]
arm_df["sem"] = arm.se_hat[metric]
arm_dfs.append(arm_df)
effect_table = marginal_effects(pd.concat(arm_dfs, 0))
varnames = effect_table["Name"].unique()
data: List[Any] = []
for varname in varnames:
var_df = effect_table[effect_table["Name"] == varname]
data += [
go.Bar(
x=var_df["Level"],
y=var_df["Beta"],
error_y={"type": "data", "array": var_df["SE"]},
name=varname,
)
]
fig = subplots.make_subplots(
cols=len(varnames),
rows=1,
subplot_titles=list(varnames),
print_grid=False,
shared_yaxes=True,
)
for idx, item in enumerate(data):
fig.append_trace(item, 1, idx + 1)
fig.layout.showlegend = False
# fig.layout.margin = go.layout.Margin(l=2, r=2)
fig.layout.title = "Marginal Effects by Factor"
fig.layout.yaxis = {
"title": "% better than experiment average",
"hoverformat": ".{}f".format(DECIMALS),
}
return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC) | f68c72d54e4e8ff1011ae6daec8a00ab30069d78 | 16,273 |
def _client_row_class(client: dict) -> str:
"""
Set the row class depending on what's in the client record.
"""
required_cols = ['trust_balance', 'refresh_trigger']
for col in required_cols:
if col not in client:
return 'dark'
try:
if client['trust_balance'] > client['refresh_trigger']:
return 'success'
except TypeError:
return 'dark'
return 'danger' | cd5ebd8fd64c7d994d6803df473cd317af65e9ac | 16,274 |
def num2ord(place):
"""Return ordinal for the given place."""
omap = { u'1' : u'st',
u'2' : u'nd',
u'3' : u'rd',
u'11' : u'th',
u'12' : u'th',
u'13' : u'th' }
if place in omap:
return place + omap[place]
elif place.isdigit():
if len(place) > 1 and place[-1] in omap: # last digit 1,2,3
return place + omap[place[-1]]
else:
return place + u'th'
else:
return place | 3552257bba134ac00ed8c68d72bf5c947424b2e7 | 16,275 |
from typing import Type
def _get_dist_class(
policy: Policy, config: AlgorithmConfigDict, action_space: gym.spaces.Space
) -> Type[TFActionDistribution]:
"""Helper function to return a dist class based on config and action space.
Args:
policy: The policy for which to return the action
dist class.
config: The Algorithm's config dict.
action_space (gym.spaces.Space): The action space used.
Returns:
Type[TFActionDistribution]: A TF distribution class.
"""
if hasattr(policy, "dist_class") and policy.dist_class is not None:
return policy.dist_class
elif config["model"].get("custom_action_dist"):
action_dist_class, _ = ModelCatalog.get_action_dist(
action_space, config["model"], framework="tf"
)
return action_dist_class
elif isinstance(action_space, Discrete):
return Categorical
elif isinstance(action_space, Simplex):
return Dirichlet
else:
assert isinstance(action_space, Box)
if config["normalize_actions"]:
return SquashedGaussian if not config["_use_beta_distribution"] else Beta
else:
return DiagGaussian | 08c09b876d5c2797d517a87957049c34939aee3a | 16,276 |
def expectation_values(times, states, operator):
"""expectation values of operator at times wrt states"""
def exp_value(state, operator, time):
if len(state.shape) == 2: #DensityMatrix
return np.trace(np.dot(state, operator(time)))
else: #StateVector
return np.vdot(state, np.dot(operator(time), state))
evs = np.ndarray(times.shape, dtype=complex)
for i in range(times.shape[0]):
evs[i] = exp_value(states[i], operator, times[i])
return evs | 4c18fa3b2ad7bec01f8f833ade59fe90315724ec | 16,277 |
import http
def bookmark(request):
"""
Add or remove a bookmark based on POST data.
"""
if request.method == 'POST':
# getting handler
model_name = request.POST.get('model', u'')
model = django_apps.get_model(*model_name.split('.'))
if model is None:
# invalid model -> bad request
return http.HttpResponseBadRequest(ERRORS['model'])
handler = handlers.library.get_handler(model)
if handler is None:
# bad or unregistered model -> bad request
return http.HttpResponseBadRequest(ERRORS['handler'])
# getting form
form = handler.get_form(request, data=request.POST)
if form.is_valid():
instance = form.instance()
bookmark_model = handler.backend.get_model()
# validating the bookmark key
key = handler.get_key(request, instance, form.cleaned_data['key'])
if not handler.allow_key(request, instance, key):
return http.HttpResponseBadRequest(ERRORS['key'])
# pre-save signal: receivers can stop the bookmark process
# note: one receiver is always called: *handler.pre_save*
# handler can disallow the vote
responses = signals.bookmark_pre_save.send(sender=bookmark_model,
form=form, request=request)
# if one of the receivers returns False then bookmark process
# must be killed
for receiver, response in responses:
if response is False:
return http.HttpResponseBadRequest(
u'Receiver %r killed the bookmark process' %
receiver.__name__)
# adding or removing the bookmark
bookmark = handler.save(request, form)
created = bool(bookmark.pk)
# post-save signal
# note: one receiver is always called: *handler.post_save*
signals.bookmark_post_save.send(sender=bookmark_model,
bookmark=bookmark, request=request, created=created)
# process completed successfully: redirect
return handler.response(request, bookmark, created)
# form is not valid: must handle errors
return handler.fail(request, form.errors)
# only answer POST requests
return http.HttpResponseForbidden('Forbidden.') | 32743894345e170d6d0efc427f3be0fb8d24b044 | 16,279 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.