content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import re
from typing import Sequence
def resolve_pointer(document, pointer: str):
"""
Resolve a JSON pointer ``pointer`` within the referenced ``document``.
:param document: the referent document
:param str pointer: a json pointer URI fragment to resolve within it
"""
root = document
# Do only split at single forward slashes which are not prefixed by a caret
parts = re.split(r"(?<!\^)/", unquote(pointer.lstrip("/"))) if pointer else []
for part in parts:
# Restore escaped slashes and carets
replacements = {r"^/": r"/", r"^^": r"^"}
part = re.sub(
"|".join(re.escape(key) for key in replacements.keys()),
lambda k: replacements[k.group(0)],
part,
)
if isinstance(document, Sequence):
# Try to turn an array index to an int
try:
part = int(part)
except ValueError:
pass
try:
document = document[part]
except KeyError as e:
raise KeyError(f"Pointer does not resolve to value: {pointer}") from e
if document is root:
# Prevents infinite recursion on same document
return document
else:
return replace(document, root) | c37323b19547f4cb8e20eb8c391d606e77c68b66 | 3,657,700 |
def load_config_file(config_file):
""" Loads the given file into a list of lines
:param config_file: file name of the config file
:type config_file: str
:return: config file as a list (one item per line) as returned by open().readlines()
"""
with open(config_file, 'r') as f:
config_document = f.readlines()
return config_document | 6a6e0199566e9ea27db309b2164f323cd5f57fdc | 3,657,701 |
def retrieve_analysis_report(accession, fields=None, file=None):
"""Retrieve analysis report from ENA
:param accession: accession id
:param fields: comma-separated list of fields to have in the report (accessible with get_returnable_fields with result=analysis)
:param file: filepath to save the content of the report
:return: requested run repor
"""
return retrieve_filereport(
accession=accession,
result="analysis",
fields=fields,
file=file) | 4adde7fea75c26f809d5efc1b5ff04b31ff9fa87 | 3,657,702 |
import code_hygiene
import sys
import os
def _CommonChecks(input_api, output_api):
"""Checks for both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, project_name='Native Client',
excluded_paths=tuple(EXCLUDE_PROJECT_CHECKS)))
# The commit queue assumes PRESUBMIT.py is standalone.
# TODO(bradnelson): Migrate code_hygiene to a common location so that
# it can be used by the commit queue.
old_sys_path = list(sys.path)
try:
sys.path.append(os.path.join(NACL_TOP_DIR, 'tools'))
sys.path.append(os.path.join(NACL_TOP_DIR, 'build'))
finally:
sys.path = old_sys_path
del old_sys_path
affected_files = input_api.AffectedFiles(include_deletes=False)
exclude_dirs = [ NACL_TOP_DIR + '/' + x for x in EXCLUDE_PROJECT_CHECKS ]
for filename in affected_files:
filename = filename.AbsoluteLocalPath()
if filename in exclude_dirs:
continue
if not IsFileInDirectories(filename, exclude_dirs):
errors, warnings = code_hygiene.CheckFile(filename, False)
for e in errors:
results.append(output_api.PresubmitError(e, items=errors[e]))
for w in warnings:
results.append(output_api.PresubmitPromptWarning(w, items=warnings[w]))
return results | 9bebe1909c70a053b8d16f5aa17958efb106ce52 | 3,657,703 |
import copy
def visualize_cluster_entropy(
doc2vec, eval_kmeans, om_df, data_cols, ks, cmap_name="brg"
):
"""Visualize entropy of embedding space parition. Currently only supports doc2vec embedding.
Parameters
----------
doc2vec : Doc2Vec model instance
Instance of gensim.models.doc2vec.Doc2Vec
eval_kmeans : callable
Callable cluster fit function
For instance,
.. code-block:: python
def eval_kmeans(X,k):
km = KMeans(n_clusters=k)
km.fit(X)
return km
om_df : DataFrame
A pandas dataframe containing O&M data, which contains columns specified in om_col_dict
data_cols : list
List of column names (str) which have text data.
ks : list
List of k parameters required for the clustering mechanic `eval_kmeans`
cmap_name :
Optional, color map
Returns
-------
Matplotlib figure instance
"""
df = om_df.copy()
cols = data_cols
fig = plt.figure(figsize=(6, 6))
cmap = plt.cm.get_cmap(cmap_name, len(cols) * 2)
for i, col in enumerate(cols):
X = df[col].tolist()
X = [x.lower() for x in X]
tokenized_data = [word_tokenize(x) for x in X]
doc2vec_data = [
TaggedDocument(words=x, tags=[str(i)]) for i, x in enumerate(tokenized_data)
]
model = copy.deepcopy(doc2vec)
model.build_vocab(doc2vec_data)
model.train(
doc2vec_data, total_examples=model.corpus_count, epochs=model.epochs
)
X_doc2vec = [model.infer_vector(tok_doc) for tok_doc in tokenized_data]
sse = []
clusters = []
for true_k in ks:
km = eval_kmeans(X_doc2vec, true_k)
sse.append(km.inertia_)
clusters.append(km.labels_)
plt.plot(
ks, sse, color=cmap(2 * i), marker="o", label=f"Doc2Vec + {col} entropy"
)
vectorizer = TfidfVectorizer()
X_tfidf = vectorizer.fit_transform(X)
sse = []
clusters = []
for true_k in ks:
km = eval_kmeans(X_tfidf, true_k)
sse.append(km.inertia_)
clusters.append(km.labels_)
plt.plot(
ks, sse, color=cmap(2 * i + 1), marker="o", label=f"TF-IDF + {col} entropy"
)
plt.xlabel(r"Number of clusters *k*")
plt.ylabel("Sum of squared distance")
plt.legend()
return fig | 5e91167fa23c09ada2f81d5e388e5d95a84fe283 | 3,657,704 |
def delete_student_meal_plan(
person_id: str = None,
academic_term_id: str = None):
"""
Removes a meal plan from a student.
:param person_id: The numeric ID of the person.
:param academic_term_id: The numeric ID of the academic term you're interested in.
:returns: String containing xml or an lxml element.
"""
return get_anonymous(
'deleteStudentMealPlan',
person_id=person_id,
academic_term_id=academic_term_id) | 75cd875b751b5af0ccbf71c090712f73b866f29b | 3,657,705 |
import torch
def colorize(x):
"""Converts a one-channel grayscale image to a color heatmap image. """
if x.dim() == 2:
torch.unsqueeze(x, 0, out=x)
return
if x.dim() == 3:
cl = torch.zeros([3, x.size(1), x.size(2)])
cl[0] = gauss(x, .5, .6, .2) + gauss(x, 1, .8, .3)
cl[1] = gauss(x, 1, .5, .3)
cl[2] = gauss(x, 1, .2, .3)
cl[cl.gt(1)] = 1
return cl
elif x.dim() == 4:
cl = torch.zeros([x.size(0), 3, x.size(2), x.size(3)])
cl[:, 0, :, :] = gauss(x, .5, .6, .2) + gauss(x, 1, .8, .3)
cl[:, 1, :, :] = gauss(x, 1, .5, .3)
cl[:, 2, :, :] = gauss(x, 1, .2, .3)
return cl | 0b5c95bc296fcdc3ed352d656984a3f46fa66fad | 3,657,706 |
from typing import Counter
def local_role_density(
annotated_hypergraph, include_focus=False, absolute_values=False, as_array=False
):
"""
Calculates the density of each role within a 1-step neighbourhood
of a node, for all nodes.
Input:
annotated_hypergraph [AnnotatedHypergraph]: An annotated hypergraph.
include_focus [Bool]: If True, includes the roles of the focal node
in th calculation.
absolute_values [Bool]: If True, returns role counts rather than densities.
as_array [Bool]: If True, return an array rather than a Counter.
Returns:
role_densities []: An array of dimension (# nodes x # roles)
describing the density of each role.
"""
A = annotated_hypergraph
def get_counts(group):
return Counter([x.role for x in group])
by_edge = {
eid: get_counts(v)
for eid, v in groupby(
sorted(A.IL, key=lambda x: x.eid, reverse=True), lambda x: x.eid
)
}
densities = {}
for incidence in A.IL:
densities[incidence.nid] = (
densities.get(incidence.nid, Counter()) + by_edge[incidence.eid]
)
if not include_focus:
densities[incidence.nid] = densities.get(
incidence.nid, Counter()
) - Counter([incidence.role])
keys = set(chain.from_iterable(densities.values()))
for item in densities.values():
item.update({key: 0 for key in keys if key not in item})
if not absolute_values:
normalise_counters(densities)
if as_array:
densities = to_matrix(densities, A)
return densities | 75ccc0e51ad627f3bd6f2f664bfaa93fd9e532d8 | 3,657,707 |
import re
import requests
def get(url: str) -> dict:
"""
author、audioName、audios
"""
data = {}
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Host": "www.kuwo.cn",
"Referer": "http://www.kuwo.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
}
song_info_url_format = "http://m.kuwo.cn/newh5/singles/songinfoandlrc?musicId={id}"
mp3_url_format = "http://www.kuwo.cn/url?format=mp3&rid={id}&response=url&type=convert_url3&br={quality}&from=web"
# http://www.kuwo.cn/play_detail/*********
id = re.findall(r"/(\d{1,})", url)
if id:
id = id[0]
else:
data["msg"] = "不支持输入的链接形式"
return data
session = requests.session()
# 得到最高品质以及歌曲信息
with session.get(song_info_url_format.format(id=id), headers=headers, timeout=10) as rep:
if rep.status_code == 200 and rep.json().get("status") == 200:
best_quality = rep.json().get("data").get(
"songinfo").get("coopFormats")[0]
author = rep.json().get("data").get("songinfo").get("artist")
song_name = rep.json().get("data").get("songinfo").get("songName")
pic = rep.json().get("data").get("songinfo").get("pic")
data["author"] = author
data["audioName"] = song_name
data["imgs"] = [pic]
else:
data["msg"] = "获取失败"
return data
if not best_quality:
best_quality = "128kmp3"
# 得到歌曲链接
with session.get(mp3_url_format.format(id=id, quality=best_quality), headers=headers, timeout=10) as rep:
if rep.status_code == 200 and rep.json().get("code") == 200:
play_url = rep.json().get("url")
data["audios"] = [play_url]
else:
data["msg"] = "获取音频链接失败"
return data | 5dd97f4974b1fdc0a89ad36bbb14ad5c26e1582d | 3,657,708 |
import glob
def get_subject_mask(subject, run=1, rois=[1030,2030], path=DATADIR,
space=MRISPACE,
parcellation=PARCELLATION):
"""
Get subject mask by run and ROI key to apply to a dataset
(rois are in DATADIR/PARCELLATION.tsv)
inputs:
subject - sid00[0-9]{4}
run - which run to use for parcellation (redundant?) [1-8]
rois - list of regions of interest for mask [1030,2030]
path - dir containing roi parcellations [DATADIR]
space - parcellation space [MRISPACE]
parcellation- file [PARCELLATION]
outputs:
mask_ds - pymvpa Dataset containing mask data {0,[rois]}
"""
fname = opj(path, 'sub-%s'%subject, 'func', 'sub-%s_task-*_run-%02d_space-%s_%s.nii.gz'%(subject, run, space, parcellation))
#print fname
fname = glob.glob(fname)[0]
ds=P.fmri_dataset(fname)
found = np.where(np.isin(ds.samples,rois))[1]
return ds[:,found] | 2838c8067dff1878af2b7f874cffa3cd4fc5adf1 | 3,657,709 |
def social_auth(user):
"""
Return True if specified user has logged in with local account, False if user
uses 3rd party account for sign-in.
"""
return True if user.password is not settings.SOCIAL_AUTH_USER_PASSWORD else False | 197fccbe875de1e4ea08a28a2f7927160d9dae6e | 3,657,710 |
def update_post(post_id):
"""
The route used to update a post. It displays the create_post.html page with the original posts contents filled in,
and allows the user to change anything about the post. When the post has been successfully updated it redirects to
the post route.
"""
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('posts.post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html', title='Update Post', form=form, legend='Update Post') | 2761f2fdc9ad50f96c36e8ee13f87f67275b360e | 3,657,711 |
def lrelu(x, leak=0.2, scope="lrelu"):
"""
leaky relu
if x > 0: return x
else: return leak * x
:param x: tensor
:param leak: float, leak factor alpha >= 0
:param scope: str, name of the operation
:return: tensor, leaky relu operation
"""
with tf.variable_scope(scope):
# if leak < 1:
# return tf.maximum(x, leak * x)
# elif x > 0:
# return x
# else:
# return leak * x
return tf.nn.relu(x) - leak * tf.nn.relu(-x) | 25757c70ae7d37b568b16ea9d489a5d15b68042f | 3,657,712 |
from typing import Callable
from typing import Optional
def enable_async(func: Callable) -> Callable:
"""
Overview:
Empower the function with async ability.
Arguments:
- func (:obj:`Callable`): The original function.
Returns:
- runtime_handler (:obj:`Callable`): The wrap function.
"""
@wraps(func)
def runtime_handler(task: "Task", *args, async_mode: Optional[bool] = None, **kwargs) -> "Task":
"""
Overview:
If task's async mode is enabled, execute the step in current loop executor asyncly,
or execute the task sync.
Arguments:
- task (:obj:`Task`): The task instance.
- async_mode (:obj:`Optional[bool]`): Whether using async mode.
Returns:
- result (:obj:`Union[Any, Awaitable]`): The result or future object of middleware.
"""
if async_mode is None:
async_mode = task.async_mode
if async_mode:
assert not kwargs, "Should not use kwargs in async_mode, use position parameters, kwargs: {}".format(kwargs)
t = task._async_loop.run_in_executor(task._thread_pool, func, task, *args, **kwargs)
task._async_stack.append(t)
return task
else:
return func(task, *args, **kwargs)
return runtime_handler | 82b44b7e3bc3b59e6d0a12aca7983f3c2b0f6468 | 3,657,713 |
from io import StringIO
def get_past_data_from_bucket_as_dataframe():
"""Read a blob"""
bucket_name = "deep_learning_model_bucket"
blob_name = "past_data.csv"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name)
return_data = blob.download_as_text()
return_data = StringIO(return_data)
df = pd.read_csv(return_data, sep=",", header=0, index_col=False)
return df | 74ca2fddbd94efc51652974cc2e62a699e92ea80 | 3,657,714 |
def align_junction_LeftRight(viral_seq, bp_pos, ri_pos, align_to="L"):
"""If align_to="L", put all the ambiguous nucleotides in the
'body' part of the junction defined by bp_pos and ri_pos,
that is the junction point is moved as close to the 5'
end (of viral_seq) as possible. If align_to="R", do the opposite."""
py_ri = ri_pos-1 # in this way viral_seq[py_ri] is the first nucleotide after the junction obtained by DI-tector
py_bp = bp_pos-1 # in this way viral_seq[py_bp] is the last nucleotide before the junction obtained by DI-tector
assert (align_to == "L" or align_to == "R"), "Plese enter R or L to align as right as possible or as left as possible."
new_bp_pos = py_bp
new_ri_pos = py_ri
try_next_alignement = True
while try_next_alignement:
if align_to == "L":
if vir_seq[new_bp_pos] == vir_seq[new_ri_pos-1]:
new_bp_pos -= 1
new_ri_pos -= 1
else:
try_next_alignement = False
elif align_to == "R":
if vir_seq[new_bp_pos+1] == vir_seq[new_ri_pos]:
new_bp_pos += 1
new_ri_pos += 1
else:
try_next_alignement = False
new_bp_pos += 1 # in this way I am using a fixed convention
new_ri_pos += 1 # in this way I am using a fixed convention
return new_bp_pos, new_ri_pos | 92634e4ff6ca6600fb91046202c240d87aac1135 | 3,657,715 |
def initialize_sqlite_tables_if_not_initialized() -> bool:
"""
Initialize the sqlite tables if they have not been
initialized yet.
Returns
-------
initialized : bool
If initialized, returns True.
"""
table_exists: bool = _table_exists(
table_name=TableName.EXPRESSION_NORMAL)
if table_exists:
return False
_create_expression_normal_table()
_create_expression_handler_table()
_create_indent_num_normal_table()
_create_indent_num_handler_table()
_create_last_scope_table()
_create_event_handler_scope_count_table()
_create_loop_count_table()
_create_debug_mode_setting_table()
_create_debug_mode_callable_count_table()
_create_stage_elem_id_table()
_create_variable_name_count_table()
_create_handler_calling_stack_table()
_create_circular_calling_handler_name_table()
_create_stage_id_table()
return True | 9392b404d9023356902efaed8837cdd2d32cdd99 | 3,657,716 |
def poisson2vpvs(poisson_ratio):
"""
Convert Poisson's ratio to Vp/Vs ratio.
Parameters
----------
poisson_ratio : float
Poisson's ratio.
Returns
-------
vpvs_ratio : float
Vp/Vs ratio.
"""
return sqrt(2 * (poisson_ratio - 1) / (2 * poisson_ratio - 1)) | 9f0f41ec3dd5be539dca341ed2ec4b091aeee730 | 3,657,717 |
def vs30_to_z1pt0_cy14(vs30, japan=False):
"""
Returns the estimate depth to the 1.0 km/s velocity layer based on Vs30
from Chiou & Youngs (2014) California model
:param numpy.ndarray vs30:
Input Vs30 values in m/s
:param bool japan:
If true returns the Japan model, otherwise the California model
:returns:
Z1.0 in m
"""
if japan:
c1 = 412. ** 2.
c2 = 1360.0 ** 2.
return np.exp((-5.23 / 2.0) *
np.log((np.power(vs30,2.) + c1) / (c2 + c1)))
else:
c1 = 571 ** 4.
c2 = 1360.0 ** 4.
return np.exp((-7.15 / 4.0) * np.log((vs30 ** 4. + c1) / (c2 + c1))) | f4f985c2e0aa533cac8afd8533bfad126edbaf01 | 3,657,718 |
def empowerment(iface, priority=0):
"""
Class decorator for indicating a powerup's powerup interfaces.
The class will also be declared as implementing the interface.
@type iface: L{zope.interface.Interface}
@param iface: The powerup interface.
@type priority: int
@param priority: The priority the powerup will be installed at.
"""
def _deco(cls):
cls.powerupInterfaces = (
tuple(getattr(cls, 'powerupInterfaces', ())) +
((iface, priority),))
implementer(iface)(cls)
return cls
return _deco | 2f63892e539928951ca462239cfd014bdd8b0409 | 3,657,719 |
def multigauss_and_bgd_jacobian(x, *params):
"""Jacobien of the multiple Gaussian profile plus a polynomial background to data.
The degree of the polynomial background is fixed by parameters.CALIB_BGD_NPARAMS.
The order of the parameters is a first block CALIB_BGD_NPARAMS parameters (from low to high Legendre polynome degree,
contrary to np.polyval), and then block of 3 parameters for the Gaussian profiles like amplitude, mean and standard
deviation. x values are renormalised on the [-1, 1] interval for the background.
Parameters
----------
x: array
The x data values.
*params: list of float parameters as described above.
Returns
-------
y: array
The jacobian values.
Examples
--------
>>> import spectractor.parameters as parameters
>>> parameters.CALIB_BGD_NPARAMS = 4
>>> x = np.arange(600.,800.,1)
>>> p = [20, 1, -1, -1, 20, 650, 3, 40, 750, 5]
>>> y = multigauss_and_bgd_jacobian(x, *p)
>>> assert(np.all(np.isclose(y.T[0],np.ones_like(x))))
>>> print(y.shape)
(200, 10)
"""
bgd_nparams = parameters.CALIB_BGD_NPARAMS
out = []
x_norm = rescale_x_for_legendre(x)
for k in range(bgd_nparams):
# out.append(params[k]*(parameters.CALIB_BGD_ORDER-k)*x**(parameters.CALIB_BGD_ORDER-(k+1)))
# out.append(x ** (bgd_nparams - 1 - k))
c = np.zeros(bgd_nparams)
c[k] = 1
out.append(np.polynomial.legendre.legval(x_norm, c))
for k in range((len(params) - bgd_nparams) // 3):
jac = gauss_jacobian(x, *params[bgd_nparams + 3 * k:bgd_nparams + 3 * k + 3]).T
for j in jac:
out.append(list(j))
return np.array(out).T | e4347f50da3bd94ec4aac49c5af3ead4e08fed3b | 3,657,720 |
def get_empty_config():
"""
Return an empty Config object with no options set.
"""
empty_color_config = get_empty_color_config()
result = Config(
examples_dir=None,
custom_dir=None,
color_config=empty_color_config,
use_color=None,
pager_cmd=None,
editor_cmd=None,
squeeze=None,
subs=None
)
return result | f744d770822bd6d58777d346674ea6c16b665701 | 3,657,721 |
def parse(sql_string):
"""Given a string containing SQL, parse it and return the normalized result."""
parsed = select_stmt.parseString(sql_string)
parsed.from_clause = _normalize_from_clause(parsed.from_clause)
parsed.where_clause = _normalize_where_clause(parsed.where_clause)
return parsed | 96a369262bfd852ab7da484c86a2cd06b121d4be | 3,657,722 |
from pathlib import Path
def check_overwrite(path: str, overwrite: bool = False) -> str:
"""
Check if a path exists, if so raising a RuntimeError if overwriting is disabled.
:param path: Path
:param overwrite: Whether to overwrite
:return: Path
"""
if Path(path).is_file() and not overwrite:
raise RuntimeError(
f"Requested existing {path!r} as output, but overwriting is disabled."
)
return path | 961affdcc87b055cdd5acb9a28547ef87ae426b9 | 3,657,723 |
import struct
def bytes_to_text(input):
"""Converts given bytes (latin-1 char + padding)*length to text"""
content = struct.unpack((int(len(input)/2))*"sx", input)
return "".join([x.decode("latin-1") for x in content]).rstrip("\x00") | f058847886fc3a488c54b8e01c3d7506f6d76510 | 3,657,724 |
def flatten_conv_capsule(inputs):
"""
:param inputs is output from a convolutional capsule layer
inputs.shape = [N,OH,OW,C,PH] C is channel number, PH is vector length
:return shape = [N,OH*OW*C,PH]
"""
inputs_shape = inputs.shape
l=[]
for i1 in range(inputs_shape[1]):
for i2 in range(inputs_shape[2]):
for i3 in range(inputs_shape[3]):
l.append(inputs[:,i1,i2,i3,:])
out = tf.stack(l,axis=1)
return out | 8be5319c85311122ef728af5266dd9aecf549be0 | 3,657,725 |
def LookupValue(values, name, scope, kind):
"""Like LookupKind, but for constant values."""
# If the type is an enum, the value can be specified as a qualified name, in
# which case the form EnumName.ENUM_VALUE must be used. We use the presence
# of a '.' in the requested name to identify this. Otherwise, we prepend the
# enum name.
if isinstance(kind, mojom.Enum) and '.' not in name:
name = '%s.%s' % (kind.spec.split(':', 1)[1], name)
for i in reversed(xrange(len(scope) + 1)):
test_spec = '.'.join(scope[:i])
if test_spec:
test_spec += '.'
test_spec += name
value = values.get(test_spec)
if value:
return value
return values.get(name) | 211a4b161c74971d59d256db28935e33005f1782 | 3,657,726 |
def SI1452(key,
Aleph=u'\u05d0', Tav=u'\u05ea'):
"""
Minimalist caps action
Make sure latin capital letters are produced in keys carrying them
(additionally, make Hebrew-letter keys go to level 2)
"""
if Aleph<=key.level_chars[1]<=Tav or u'A' <=key.level_chars[2]<=u'Z':
return CapsByShift()
else:
return None | 51c14d32d090d25e5625582e32d8bbd45d9a819b | 3,657,727 |
def new_topic(request):
"""添加新主题"""
if request.method != 'POST':
form = TopicForm() # 如果不是POST请求, 表示首次请求, 返回空表单
else:
# POST提交了数据, 对数据进行处理
form = TopicForm(request.POST) # 根据请求传入的数据创建一个表单对象
# is_valid()函数核实用户填写了所有必不可少的字段(表单字段默认都是必不可少的),
# 且输入的数据与要求的字段类型一致
if form.is_valid(): # 检查表单的信息是否合法有效
form.save() # 将数据保存至数据库
# reverse()获取页面对应的URL, HttpResponseRedirect用于将浏览器页面重定向到topic
return HttpResponseRedirect(reverse('learning_logs:topics'))
context = {'form': form} # 传入到父类模板中显示, html中包含context字段
return render(request, 'learning_logs/new_topic.html', context) | b4e6524f0c8d4fdc6d88bd879f61c7430112ae9f | 3,657,728 |
import subprocess
import sys
def pkg_config(*packages, **kw):
"""Translate pkg-config data to compatible Extension parameters.
Example usage:
>>> from distutils.extension import Extension
>>> from pkgdist import pkg_config
>>>
>>> ext_kwargs = dict(
... include_dirs=['include'],
... extra_compile_args=['-std=c++11'],
... )
>>> extensions = [
... Extension('foo', ['foo.c']),
... Extension('bar', ['bar.c'], **pkg_config('lcms2')),
... Extension('ext', ['ext.cpp'], **pkg_config(('nss', 'libusb-1.0'), **ext_kwargs)),
... ]
"""
flag_map = {
'-I': 'include_dirs',
'-L': 'library_dirs',
'-l': 'libraries',
}
try:
tokens = subprocess.check_output(
['pkg-config', '--libs', '--cflags'] + list(packages)).split()
except OSError as e:
sys.stderr.write(f'running pkg-config failed: {e.strerror}\n')
sys.exit(1)
for token in tokens:
token = token.decode()
if token[:2] in flag_map:
kw.setdefault(flag_map.get(token[:2]), []).append(token[2:])
else:
kw.setdefault('extra_compile_args', []).append(token)
return kw | 1a3471ed181f135b401c09fc8e798889451923fe | 3,657,729 |
from pathlib import Path
async def get_xdg_data_dir(app=None):
"""Return a data directory for this app.
Create the directory if it does not exist.
"""
if app is None:
app = await get_package_name()
data_home = Path(await get_xdg_home('XDG_DATA_HOME'))
data_dir = data_home / app
if not await data_dir.exists():
await data_dir.mkdir()
return await data_dir.resolve() | fe73abc5e91bf2c3c4a1dfd745981b1e0733aa5a | 3,657,730 |
def patch_subscription(subscription, data):
""" Patches the given subscription with the data provided
"""
return stage_based_messaging_client.update_subscription(
subscription["id"], data) | 68575f24a0e0a881b8c3a9572a496b2ba6293be2 | 3,657,731 |
def update_game(game_obj, size, center1, center2):
"""
Update game state
"""
new_game_obj = game_obj.copy()
if center1 is not None:
new_game_obj['rudder1_pos'] = center1
if center2 is not None:
new_game_obj['rudder2_pos'] = center2
# Check if hitting corner
init_vel = new_game_obj['velocity']
if new_game_obj['pos'][1] >= 480-15 or new_game_obj['pos'][1] <= 15:
new_game_obj['velocity'] = (init_vel[0], -1*init_vel[1])
if new_game_obj['pos'][0] >= 640-15:
new_game_obj['pos'] = (size[1]/2, size[0]/2)
new_game_obj['velocity'] = (-1.05*abs(new_game_obj['velocity'][0]),
1.05*abs(new_game_obj['velocity'][1]))
new_game_obj['score1'] += 1
elif new_game_obj['pos'][0] <= 15:
new_game_obj['pos'] = (size[1]/2, size[0]/2)
new_game_obj['score2'] += 1
new_game_obj['velocity'] = (1.05*abs(new_game_obj['velocity'][0]),
-1.05*abs(new_game_obj['velocity'][1]))
elif 0 <= new_game_obj['pos'][0]-new_game_obj['rudder1_pos'][0] <= 17 and new_game_obj['rudder1_pos'][1]-(50+15) < new_game_obj['pos'][1] < new_game_obj['rudder1_pos'][1] + 50+15:
new_game_obj['velocity'] = (-1*init_vel[0], init_vel[1])
elif 0 <= new_game_obj['rudder2_pos'][0] - new_game_obj['pos'][0] <= 17 and new_game_obj['rudder2_pos'][1]-(50+15) < new_game_obj['pos'][1] < new_game_obj['rudder2_pos'][1]+(50+15):
init_vel = new_game_obj['velocity']
new_game_obj['velocity'] = (-1*init_vel[0], init_vel[1])
new_game_obj['pos'] = (new_game_obj['pos'][0] + new_game_obj['velocity']
[0], new_game_obj['pos'][1] + new_game_obj['velocity'][1])
# print(new_game_obj)
return new_game_obj | 33646593e6743d11174f72be6f4b825633fe8782 | 3,657,732 |
def get_background_pools(experiment: Experiment) -> ThreadPoolExecutor:
"""
Create a pool for background activities. The pool is as big as the number
of declared background activities. If none are declared, returned `None`.
"""
method = experiment.get("method")
rollbacks = experiment.get("rollbacks", [])
activity_background_count = 0
for activity in method:
if activity and activity.get("background"):
activity_background_count = activity_background_count + 1
activity_pool = None
if activity_background_count:
logger.debug(
"{c} activities will be run in the background".format(
c=activity_background_count))
activity_pool = ThreadPoolExecutor(activity_background_count)
rollback_background_pool = 0
for activity in rollbacks:
if activity and activity.get("background"):
rollback_background_pool = rollback_background_pool + 1
rollback_pool = None
if rollback_background_pool:
logger.debug(
"{c} rollbacks will be run in the background".format(
c=rollback_background_pool))
rollback_pool = ThreadPoolExecutor(rollback_background_pool)
return activity_pool, rollback_pool | 1a5fe86a84127ee56ae0e8cb62081f0b2f9af45d | 3,657,733 |
import requests
def download_thumb(se: requests.Session, proxy: dict, addr: str) -> str:
"""下载缩略图
Args:
se: 会话对象
proxy: 代理字典
addr: 缩略图地址
Returns:
成功时返回缩略图的本地绝对路径,失败时返回空字符串
"""
header = {'User-Agent': USER_AGENT}
try:
with se.get(addr,
headers=header,
proxies=proxy,
stream=True,
timeout=5) as thumb_res:
with NamedTemporaryFile('w+b', prefix='PETSpider_', delete=False) as thumb:
for chunk in thumb_res.iter_content():
thumb.write(chunk)
path = thumb.name
except (OSError, IOError):
return ''
else:
return path | 06971178c80376bb9c7bf30ca6cc8ad8c2a4b6b9 | 3,657,734 |
def iter_dir_completions(arg):
"""Generate an iterator that iterates through directory name completions.
:param arg: The directory name fragment to match
:type arg: str
"""
return iter_file_completions(arg, True) | e29544ea7886b08c03d6ba79374d0576b5af701e | 3,657,735 |
def climate_eurotronic_spirit_z_fixture(client, climate_eurotronic_spirit_z_state):
"""Mock a climate radio danfoss LC-13 node."""
node = Node(client, climate_eurotronic_spirit_z_state)
client.driver.controller.nodes[node.node_id] = node
return node | 7d24ada11d4ed30dd62d7bf160757f8909f7d4c8 | 3,657,736 |
def xi_eta_to_ab(ξ, η):
""" function to transform xi, eta coords to a, b
see Hesthaven function 'rstoab'
@param xi, eta vectors of xi, eta pts
"""
a, b = np.zeros_like(ξ), np.zeros_like(η)
singular = np.isclose(η, 1.0)
nonsingular = np.logical_not(singular)
a[nonsingular] = 2*(1. + ξ[nonsingular])/(1 - η[nonsingular]) - 1
a[singular] = -1
b = η
return a, b | 3664e90754ab9ebb86cc76f585e8383d76d6d75d | 3,657,737 |
from typing import List
import time
from datetime import datetime
import os
def generate_run_base_dir(
result_dir: str, timestamp: int = None, tag: str = None, sub_dirs: List[str] = None
) -> str:
"""
Generate a base directory for each experiment run.
Looks like this: result_dir/date_tag/sub_dir_1/.../sub_dir_n
Args:
result_dir (str): Experiment output directory.
timestamp (int): Timestamp which will be inlcuded in the form of '%y%m%d_%H%M'.
tag (str): Tag after timestamp.
sub_dirs (List[str]): List of subdirectories that should be created.
Returns:
str: Directory name.
"""
if timestamp is None:
timestamp = time.time()
if sub_dirs is None:
sub_dirs = []
# Convert time
date = datetime.datetime.fromtimestamp(timestamp)
date_str = date.strftime("%y-%m-%d_%H:%M")
# Append tag if given
if tag is None:
base_dir = date_str
else:
base_dir = date_str + "_" + tag
# Create directory
base_dir = os.path.join(result_dir, base_dir, *sub_dirs)
return base_dir | fffdd1e44b63dec9df108f0bc22a83944805ec02 | 3,657,738 |
def mulaw_to_value(mudata):
"""Convert a mu-law encoded value to linear."""
position = ((mudata & 0xF0) >> 4) + 5
return ((1 << position) | ((mudata & 0xF) << (position - 4)) | (1 << (position - 5))) - 33 | 2ccca7f13861c7a212ac3a1dd2afc439839b19a7 | 3,657,739 |
from typing import Iterable
from typing import Set
import re
import click
def init_exclusion_regexes(paths_ignore: Iterable[str]) -> Set[re.Pattern]:
"""
filter_set creates a set of paths of the ignored
entries from 3 sources:
.gitguardian.yaml
files in .git
files ignore in .gitignore
"""
res = set()
for path in paths_ignore:
if not is_pattern_valid(path):
raise click.ClickException(f"{path} is not a valid exclude pattern.")
res.add(re.compile(translate_user_pattern(path)))
return res | 54c7dd9b064fd2582545b82b2f8df324448e49f3 | 3,657,740 |
def validate_watch(value):
"""Validate "watch" parameter."""
if not value:
return None
if isinstance(value, str):
value = [_ for _ in value.split("\n") if _]
return value | 203b77f376a747cbd10f0c674897f912bb75618f | 3,657,741 |
def diatomic_unitary(a, b, c):
"""
Unitary decomposed as a diatomic gate of the form
Ztheta + X90 + Ztheta + X90 + Ztheta
"""
X90 = expm(-0.25j*np.pi*pX)
return expm(-0.5j*a*pZ)@X90@expm(-0.5j*b*pZ)@X90@expm(-0.5j*c*pZ) | 38bbd194f2179aff1874190dbbb95546790f9d91 | 3,657,742 |
def is_x_y_in_hidden_zone_all_targets(room_representation, camera_id, x, y):
"""
:description
Extend the function is_x_y_in_hidden_zone_one_target,
1.for every target in the room
:param
1. (RoomRepresentation) -- room description of the target and the cameras
2. (int) camera_id -- camera id to find it in the given room description
1. (int) x -- x coordinate of a point in the room frame
2. (int) y -- y coordinate of a point in the room frame
:return / modify vector
1. (bool) -- True if the point is not hidden
"""
camera = find_cam_in_camera_representation(room_representation, camera_id)
if camera is None:
return False
for target in room_representation.target_representation_list:
xt = target.xc
yt = target.yc
radius = target.radius
if is_x_y_in_hidden_zone_one_target(camera, x, y, xt, yt, radius):
return True
return False | a682a2ac99798c3c8d7ad23881f965ffa33071bf | 3,657,743 |
def _create_pure_mcts_player(
game: polygames.Game, mcts_option: mcts.MctsOption, num_actor: int
) -> mcts.MctsPlayer:
"""a player that uses only mcts + random rollout, no neural net"""
player = mcts.MctsPlayer(mcts_option)
for _ in range(num_actor):
actor = polygames.Actor(
None, game.get_feat_size(), game.get_action_size(), False, False, None
)
player.add_actor(actor)
return player | 8faf70a43e0734362dfd3aba0c0575758945eb09 | 3,657,744 |
def get_fans_users():
"""
获取用户的粉丝
:return:
"""
user_id = request.argget.all("user_id")
page = str_to_num(request.argget.all("page", 1))
pre = str_to_num(request.argget.all("pre", 20))
s, r = arg_verify(reqargs=[("user id", user_id)], required=True)
if not s:
return r
data = {"users": []}
fans = mdbs["user"].db.user_follow.find({"type": "account", "follow": user_id})
data_cnt = fans.count(True)
for user in fans.skip(pre * (page - 1)).limit(pre):
s, r = get_user_public_info(user_id=user["user_id"],
is_basic=False,
current_user_isauth=current_user.is_authenticated)
if s:
data["users"].append(r)
data["users"] = datas_paging(
pre=pre,
page_num=page,
data_cnt=data_cnt,
datas=data["users"])
return data | 0af36e4f2b3051975f834df554ff09e9d539ec82 | 3,657,745 |
import re
def test_invalid_patterns(list, pattern):
"""
Function to facilitate the tests in MyRegExTest class
:param list: list with strings of invalid cases
:param pattern: a regular expression
:return: list with the result of all matches which should be a list of None
"""
newList = []
for item in list:
matched = re.match(pattern, item)
if matched is None:
newList.append(None)
else:
raise ValueError(item + ' matched to ' + pattern + ' while it should not have matched')
return newList | 94a8232d66ff4c705e7a587aedc9d1cbe0b4f072 | 3,657,746 |
import argparse
def args_parse():
"""Parse the input args."""
parser = argparse.ArgumentParser(description='Certificate import')
parser.add_argument("--cert", default="./kmc/config/crt/sever.cert", type=str,
help="The path of certificate file")
parser.add_argument("--key", default='./kmc/config/crt/sever.key', type=str,
help="The path of private Key file.")
parser.add_argument("--key_component_1", default='./kmc/config/ksf/ksmaster.dat', type=str,
help="key material 1.")
parser.add_argument("--key_component_2", default='./kmc/config/ksf/ksstandby.dat', type=str,
help="key material 2.")
args = parser.parse_args()
return args | 88b114ac63afe32bac097a26bb15fe704fc2e8c1 | 3,657,747 |
def remove_constant_features(sfm):
"""
Remove features that are constant across all samples
"""
# boolean matrix of whether x == first column (feature)
x_not_equal_to_1st_row = sfm._x != sfm._x[0]
non_const_f_bool_ind = x_not_equal_to_1st_row.sum(axis=0) >= 1
return sfm.ind_x(selected_f_inds=non_const_f_bool_ind) | ae8c6e1d14b7260c8d2491b2f8a00ba352d7375a | 3,657,748 |
def flatten(x):
""" Flatten list an array.
Parameters
----------
x: list of ndarray or ndarray
the input dataset.
Returns
-------
y: ndarray 1D
the flatten input list of array.
shape: list of uplet
the input list of array structure.
"""
# Check input
if not isinstance(x, list):
x = [x]
elif len(x) == 0:
return None, None
# Flatten the dataset
y = x[0].flatten()
shape = [x[0].shape]
for data in x[1:]:
y = np.concatenate((y, data.flatten()))
shape.append(data.shape)
return y, shape | 4efb7c740cb197bf9e8e094a9e6b6e38badb5a25 | 3,657,749 |
def sample_automaton():
"""
Creates a sample automaton and returns it.
"""
# The states are a python Set. Name them whatever you want.
states = {"0","1","2"}
# Choose one of the states to be the initial state. You need to give this a Set, but that Set usually only contains one state.
init_state = {"0"}
# The set of accepted states should also be a subset of the states.
accept_states = {"0","1"}
# The automaton works based on a set alphabet.
alphabet = {"a","b"}
#The transition diagram for the automaton is a set of edges. Each edge q1 --x--> q2 is represented by a tuple (not a list!) (q1, x, q2).
# The constructor will accept the actual set, like below, or you can pass it a
# simplified string that shows the edges. So either of the two lines below works.
d = { ("0","a","1"), ("0","b","2"), ("1","a","2"), ("2","b","0") }
d = "0a1,0b2,2b0"
#create automaton
usr_auto = Automaton(states, init_state, accept_states, alphabet, d)
return usr_auto | f0b7da88825c88841e55dd1ebaf46d5979eaa0fb | 3,657,750 |
def mae(data, data_truth):
"""Computes mean absolute error (MAE)
:param data: Predicted time series values (n_timesteps, n_timeseries)
:type data: numpy array
:param data_truth: Ground truth time series values
:type data_truth: numpy array
"""
return np.mean(np.abs(data - data_truth)) | 76fce7e40adbfbf28f3d08df4117502baa01cbed | 3,657,751 |
def _find_ntc_family(guide_id):
"""Return a String of the NTC family
"""
guide_id_list = guide_id.split('_')
return '_'.join(guide_id_list[0:2]) | 2b340694c2379682b232e49c9b0f1f0a91c778cf | 3,657,752 |
def CreateFilletCurves(curve0, point0, curve1, point1, radius, join, trim, arcExtension, tolerance, angleTolerance, multiple=False):
"""
Creates a tangent arc between two curves and trims or extends the curves to the arc.
Args:
curve0 (Curve): The first curve to fillet.
point0 (Point3d): A point on the first curve that is near the end where the fillet will
be created.
curve1 (Curve): The second curve to fillet.
point1 (Point3d): A point on the second curve that is near the end where the fillet will
be created.
radius (double): The radius of the fillet.
join (bool): Join the output curves.
trim (bool): Trim copies of the input curves to the output fillet curve.
arcExtension (bool): Applies when arcs are filleted but need to be extended to meet the
fillet curve or chamfer line. If true, then the arc is extended
maintaining its validity. If false, then the arc is extended with a
line segment, which is joined to the arc converting it to a polycurve.
tolerance (double): The tolerance, generally the document's absolute tolerance.
Returns:
Curve[]: The results of the fillet operation. The number of output curves depends
on the input curves and the values of the parameters that were used
during the fillet operation. In most cases, the output array will contain
either one or three curves, although two curves can be returned if the
radius is zero and join = false.
For example, if both join and trim = true, then the output curve
will be a polycurve containing the fillet curve joined with trimmed copies
of the input curves. If join = False and trim = true, then three curves,
the fillet curve and trimmed copies of the input curves, will be returned.
If both join and trim = false, then just the fillet curve is returned.
"""
url = "rhino/geometry/curve/createfilletcurves-curve_point3d_curve_point3d_double_bool_bool_bool_double_double"
if multiple: url += "?multiple=true"
args = [curve0, point0, curve1, point1, radius, join, trim, arcExtension, tolerance, angleTolerance]
if multiple: args = list(zip(curve0, point0, curve1, point1, radius, join, trim, arcExtension, tolerance, angleTolerance))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response | fee4c009db83ff77d6369e8e88d2bd3bc1d64390 | 3,657,753 |
from typing import List
from typing import Tuple
import json
from typing import Dict
def select_ads() -> jsonify:
"""
select ads
"""
try:
if INSERTED_FILES_NUM == 0 or INSERTED_FILES_NUM != PROCESSED_FILES_NUM:
raise Exception('server is not ready')
weights: List[Tuple[str, int]] = json.loads(request.get_json())
selected_ads: List[Dict[str, int or float]] = server_algo.select_ads(weights)
return jsonify({'ads': selected_ads}), 200
except Exception as e:
return jsonify(str(e)), 400 | 7e65b21792c6b80e90dcb16bab644a329e8f7eb5 | 3,657,754 |
from typing import Dict
from typing import List
from typing import Any
def _xpath_find(data: Dict, xparts: List, create_if_missing: bool = False) -> Any:
"""
Descend into a data dictionary.
:arg data:
The dictionary where to look for `xparts`.
:arg xparts:
Elements of an Xpath split with xpath_split()
:arg bool create_if_missing:
If elements are missing from `data`, create them.
:returns:
The element identified by `xparts`.
:raises KeyError:
If create_if_missing=False and the element is not found in `data`.
:raises TypeError:
If `data` does not match the expected structure conveyed by `xparts`.
"""
for _, name, keys in xparts:
if not isinstance(data, dict):
raise TypeError("expected a dict")
if keys:
if name not in data and create_if_missing:
data[name] = KeyedList(key_name=_xpath_keys_to_key_name(keys))
lst = data[name] # may raise KeyError
if isinstance(lst, KeyedList):
try:
data = lst[_xpath_keys_to_key_val(keys)]
except KeyError:
if not create_if_missing:
raise
data = dict(keys)
lst.append(data)
elif isinstance(lst, list):
# regular python list, need to iterate over it
try:
i = _list_find_key_index(keys, lst)
data = lst[i]
except ValueError:
# not found
if not create_if_missing:
raise KeyError(keys) from None
data = dict(keys)
lst.append(data)
else:
raise TypeError("expected a list")
elif create_if_missing:
data = data.setdefault(name, {})
else:
data = data[name] # may raise KeyError
return data | 24bd5e04c5ba8bd7dc4950c3f655241d4d4e8d65 | 3,657,755 |
def r_mediate(y, t, m, x, interaction=False):
"""
This function calls the R function mediate from the package mediation
(https://cran.r-project.org/package=mediation)
y array-like, shape (n_samples)
outcome value for each unit, continuous
t array-like, shape (n_samples)
treatment value for each unit, binary
m array-like, shape (n_samples)
mediator value for each unit, here m is necessary binary and uni-
dimensional
x array-like, shape (n_samples, n_features_covariates)
covariates (potential confounders) values
interaction boolean, default=False
whether to include interaction terms in the model
interactions are terms XT, TM, MX
"""
m = m.ravel()
var_names = [[y, 'y'],
[t, 't'],
[m, 'm'],
[x, 'x']]
df_list = list()
for var, name in var_names:
if len(var.shape) > 1:
var_dim = var.shape[1]
col_names = ['{}_{}'.format(name, i) for i in range(var_dim)]
sub_df = pd.DataFrame(var, columns=col_names)
else:
sub_df = pd.DataFrame(var, columns=[name])
df_list.append(sub_df)
df = pd.concat(df_list, axis=1)
m_features = [c for c in df.columns if ('y' not in c) and ('m' not in c)]
y_features = [c for c in df.columns if ('y' not in c)]
if not interaction:
m_formula = 'm ~ ' + ' + '.join(m_features)
y_formula = 'y ~ ' + ' + '.join(y_features)
else:
m_formula = 'm ~ ' + ' + '.join(m_features +
[':'.join(p) for p in
combinations(m_features, 2)])
y_formula = 'y ~ ' + ' + '.join(y_features +
[':'.join(p) for p in
combinations(y_features, 2)])
robjects.globalenv['df'] = df
mediator_model = Rstats.lm(m_formula, data=base.as_symbol('df'))
outcome_model = Rstats.lm(y_formula, data=base.as_symbol('df'))
res = mediation.mediate(mediator_model, outcome_model, treat='t',
mediator='m', boot=True, sims=1)
relevant_variables = ['tau.coef', 'z1', 'z0', 'd1', 'd0']
to_return = [np.array(res.rx2(v))[0] for v in relevant_variables]
return to_return + [None] | c07afc4c2569566d67e652e8de4a5ee770a49218 | 3,657,756 |
def default_props(reset=False, **kwargs):
"""Return current default properties
Parameters
----------
reset : bool
if True, reset properties and return
default: False
"""
global _DEFAULT_PROPS
if _DEFAULT_PROPS is None or reset:
reset_default_props(**kwargs)
return _DEFAULT_PROPS | 43a7015bd8089082d17ca7625748a8789b3eb52a | 3,657,757 |
def ReadFlatFileNGA(xlsfile):
"""
Generate NGA flatfile dictionary for generate usage
"""
# read in excel flatfile
book = xlrd.open_workbook(xlsfile)
sh = book.sheet_by_index(0) # 'Flatfile' sheet name
keys = sh.row_values(0)
for itmp in range( len(keys) ):
keys[itmp] = keys[itmp].encode('ascii')
# Column names needed ( add more depending on selection criterion )
names_predictors = [ 'Record Sequence Number', 'EQID', # IDs
'Earthquake Magnitude', 'Dip (deg)','Rake Angle (deg)','Dept to Top Of Fault Rupture Model', 'Fault Rupture Width (km)', # source related
'Joyner-Boore Dist. (km)', 'ClstD (km)', 'FW/HW Indicator', 'Source to Site Azimuth (deg)', # source-site pair related
"GMX's C1", 'HP-H1 (Hz)', 'HP-H2 (Hz)', 'LP-H1 (Hz)', 'LP-H2 (Hz)','File Name (Horizontal 1)','File Name (Horizontal 2)', # seismogram related
'Preferred Vs30 (m/s)', 'Measured/Inferred Class', 'Z1 (m)', 'Z1.5 (m)', 'Z2.5 (m)' # site related
]
keys_predictors = ['RecordID', 'EQID',
'Mw', 'dip', 'rake', 'Ztor', 'W',
'Rjb', 'Rrup', 'Fhw', 'azimuth',
'GMX_C1', 'HP1', 'HP2', 'LP1', 'LP2', 'H1','H2',
'Vs30', 'VsFlag', 'Z1.0','Z1.5','Z2.5'
]
Fhwi = {'hw':1,'fw':0,'nu':0,'na':0,'':None} # relate indicators to Fhw flag
# IM related
names_IMs = ['Record Sequence Number', 'PGA (g)', 'PGV (cm/sec)', 'PGD (cm)' ]
keys_IMs = ['RecordID', 'PGA', 'PGV', 'PGD']
periods = []
for ikey, key in enumerate( keys ):
if isinstance( key, str ):
key.encode( 'ascii' )
# key now is one of the column name
if key[0] == 'T' and key[-1] == 'S':
names_IMs.append( key )
keys_IMs.append( 'SA'+key[1:-1] )
periods.append( float(key[1:-1]) )
# colname and colindex map
icol_dictP = {}
icol_dictI = {}
for ikey, key in enumerate( keys ):
if key in names_predictors:
icol_dictP[key] = ikey
if key in names_IMs:
icol_dictI[key] = ikey
nga_flats = {}; nga_IMs = {}
for icol, key in enumerate( names_predictors ):
col0 = sh.col_values(icol_dictP[key])
col0[0] = col0[0].encode('ascii')
if isinstance( col0[1], str ):
if key == 'FW/HW Indicator':
# Fhw string to flag (int)
for irow in range(1, len(col0) ):
col0[irow] = col0[irow].encode('ascii')
col0[irow] = Fhwi[col0[irow]]
else:
for irow in range(1, len(col0) ):
col0[irow] = col0[irow].encode('ascii')
keyP = keys_predictors[icol]
nga_flats[keyP] = col0[1:]
for icol, key in enumerate( names_IMs ):
col0 = sh.col_values(icol_dictI[key])
if isinstance( col0[1], str ):
for irow in range(1, len(col0) ):
col0[irow] = col0[irow].encode('ascii')
keyI = keys_IMs[icol]
nga_IMs[keyI] = col0[1:]
return nga_flats, nga_IMs | 8ed4c66a541cb5fcebce6965c1f9b9a514bea1ae | 3,657,758 |
def _chr_ord(x):
"""
This is a private utility function for getBytesIOString to return
chr(ord(x))
"""
return chr(ord(x)) | 8529686bf3a40cd1f2c32f458ebdba17a9b35a05 | 3,657,759 |
def vkToWchar (m):
""" Mapping from virtual key to character """
ret = []
retTbl = ['/* table of virtual key to wchar mapping tables */',
'static VK_TO_WCHAR_TABLE aVkToWcharTable[] = {']
def generate (n, g, defPrefix=''):
defname = f'aVkToWch{defPrefix}{n}'
ret.extend ([f'/* map virtual key to flags and {n} unicode output characters */',
f'static VK_TO_WCHARS{n} {defname}[] = {{'])
for vk, flags, chars in g:
def toRepr (s):
if s is None:
return WChar.NONE.cdefName
elif len (s) != 1:
# everything else belongs to ligature tables, which we
# don’t support.
raise Exception (f'only single-character strings are supported ({s!r})')
else:
return f'0x{ord (s):04X}u /*{repr (s)}*/'
chars = ', '.join (map (toRepr, chars))
ret.append (f'\t{{{vk.cdefName}, {flags}, {{{chars}}}}},')
ret.extend ([f'\t{{0, 0, {{{("0, "*n)}}}}},', '\t};', ''])
# add the new table
retTbl.append (f'\t{{(PVK_TO_WCHARS1) {defname}, {n}, sizeof({defname}[0])}},')
f = lambda x: len (x[2])
m = groupby (sorted (m, key=f), key=f)
for n, g in m:
generate (n, g)
# We are almost always going to need the numpad keys. They also need to be
# last, so translation from string to virtual key does not map them.
numpad = [
(VirtualKey.NUMPAD0, 0, '0'),
(VirtualKey.NUMPAD1, 0, '1'),
(VirtualKey.NUMPAD2, 0, '2'),
(VirtualKey.NUMPAD3, 0, '3'),
(VirtualKey.NUMPAD4, 0, '4'),
(VirtualKey.NUMPAD5, 0, '5'),
(VirtualKey.NUMPAD6, 0, '6'),
(VirtualKey.NUMPAD7, 0, '7'),
(VirtualKey.NUMPAD8, 0, '8'),
(VirtualKey.NUMPAD9, 0, '9'),
]
generate (1, numpad, 'Num')
retTbl.extend (['\t{NULL, 0, 0},', '\t};'])
return '\n'.join (ret + retTbl) | b0b938720c13ed45c32dcd402f43e93f24aaa111 | 3,657,760 |
def load_glove_vectors(glove_file="/home/yaguang/pretrained_models/glove.6B.50d.txt"):
"""Load the glove word vectors"""
word_vectors = {}
with open(glove_file) as f:
for line in f:
split = line.split()
word_vectors[split[0]] = [float(x) for x in split[1:]]
return word_vectors | a7bb1650885e12f436273b012d0c1c381e1be311 | 3,657,761 |
def _ParseFileVersion(file_version):
"""Convert the string file_version in event.proto into a float.
Args:
file_version: String file_version from event.proto
Returns:
Version number as a float.
"""
tokens = file_version.split("brain.Event:")
try:
return float(tokens[-1])
except ValueError:
## This should never happen according to the definition of file_version
## specified in event.proto.
logger.warn(
(
"Invalid event.proto file_version. Defaulting to use of "
"out-of-order event.step logic for purging expired events."
)
)
return -1 | 43e4cfdc116ae5e687e4c0f6ed55bc8b7518f6b1 | 3,657,762 |
import gzip
def get_file_format(input_files):
"""
Takes all input files and checks their first character to assess
the file format. Returns one of the following strings; fasta, fastq,
other or mixed. fasta and fastq indicates that all input files are
of the same format, either fasta or fastq. other indiates that all
files are not fasta nor fastq files. mixed indicates that the inputfiles
are a mix of different file formats.
"""
# Open all input files and get the first character
file_format = []
invalid_files = []
for infile in input_files:
if is_gzipped(infile):#[-3:] == ".gz":
f = gzip.open(infile, "rb")
fst_char = f.read(1);
else:
f = open(infile, "rb")
fst_char = f.read(1);
f.close()
# Assess the first character
if fst_char == b"@":
file_format.append("fastq")
elif fst_char == b">":
file_format.append("fasta")
else:
invalid_files.append("other")
if len(set(file_format)) != 1:
return "mixed"
return ",".join(set(file_format)) | 901ed1ca81563321eb9a16e6a36fbebb12f3b2ea | 3,657,763 |
def get_nominal_hour(train_num):
"""Get the nominal hour for a train num (most frequent)"""
res = database.get().query("""
SELECT count(*) as count, substr(date, 12, 5) as hour
FROM results WHERE num = '%s'
GROUP BY hour ORDER BY count DESC LIMIT 1;
""" % train_num)
return next(res).hour | c51fba63b03e2aa930399077143ff77e1fe5f485 | 3,657,764 |
from typing import List
import os
def prepare_aggregation_data(group_name: str) -> List[PlotValues]:
"""Constructs and returns learning rate curves
Args:
group_name (str): group name for which to construct the curves
Returns:
A list of `PlotValues`.
"""
group_dir = os.path.join(FLAGS.results_dir, group_name)
# List of tuples (benchmark_name, unitary_accuracy, federated_accuracy)
labels_and_lrs = read_all_task_values(group_dir=group_dir)
assert labels_and_lrs, "No values for group found"
return [
(label, lrs, [i for i in range(1, len(lrs) + 1, 1)])
for label, lrs in labels_and_lrs
] | d94d0241adc2525a899b319d5539978320bfafc9 | 3,657,765 |
def compute_cgan_metrics(img_y, img_g, i = 0):
"""
Computes accuracy, precision, recall, f1, iou_score for passed image, return None in case of div 0
img_y: ground truth building footprint semantic map
img_g: generated image
i: 0 for entire image, 1 for inner (excluding border)
Note:
image format is (n,n,1) and each pixel is either -1 (for 'no' building at pixel) or 1 (for 'yes' building at pixel)
"""
# image size (it is square), and ring step
iz, rz = int(img_y.shape[0]), int(img_y.shape[0] / (4 * 2))
# building inner square mask (ring) where we calculate metrics
# example of such mask:
# 1 1 1 1
# 1 0 0 1
# 1 0 0 1
# 1 1 1 1
ring = np.ones(img_y.shape, dtype=bool)
ring[i * rz:iz - i * rz, i * rz:iz - i * rz, 0] = False
# now, erasing all areas which are not in ring with 0
img_y[ring] = 0
img_g[ring] = 0
# TP (true positive), TN, FP, FN
TP = np.sum(np.logical_and((img_y == 1), (img_g == 1)))
TN = np.sum(np.logical_and((img_y == -1), (img_g == -1)))
FP = np.sum(np.logical_and((img_y == -1), (img_g == 1)))
FN = np.sum(np.logical_and((img_y == 1), (img_g == -1)))
# IoU (intersection over union)
intersection = np.logical_and((img_y == 1), (img_g == 1))
union = np.logical_or((img_y == 1), (img_g == 1))
if TP + FP == 0 or TP + FN == 0:
return None
# reporting metrics
accuracy = (TP + TN) / (TP + TN + FP + FN)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
if precision == 0 and recall == 0:
return None
f1 = 2.0 * (precision * recall) / (precision + recall)
iou_score = np.sum(intersection) / np.sum(union)
return accuracy, precision, recall, f1, iou_score | be900b303d333ec1c7233d59779c8006333fba36 | 3,657,766 |
def learning(spiking_neurons, spike_times, taup, taum, Ap, Am, wmax, w_init):
"""
Takes a spiking group of neurons, connects the neurons sparsely with each other, and learns the weight 'pattern' via STDP:
exponential STDP: f(s) = A_p * exp(-s/tau_p) (if s > 0), where s=tpost_{spike}-tpre_{spike}
:param spiking_neurons, spike_times: np.arrays for Brian2's SpikeGeneratorGroup (list of lists created by `generate_spike_train.py`) - spike train used for learning
:param taup, taum: time constant of weight change (in ms)
:param Ap, Am: max amplitude of weight change
:param wmax: maximum weight (in S)
:param w_init: initial weights (in S)
:return weightmx: learned synaptic weights
"""
np.random.seed(12345)
pyrandom.seed(12345)
#plot_STDP_rule(taup/ms, taum/ms, Ap/1e-9, Am/1e-9, "STDP_rule")
PC = SpikeGeneratorGroup(nPCs, spiking_neurons, spike_times*second)
# mimics Brian1's exponentialSTPD class, with interactions='all', update='additive'
# see more on conversion: http://brian2.readthedocs.io/en/stable/introduction/brian1_to_2/synapses.html
STDP = Synapses(PC, PC,
"""
w : 1
dA_presyn/dt = -A_presyn/taup : 1 (event-driven)
dA_postsyn/dt = -A_postsyn/taum : 1 (event-driven)
""",
on_pre="""
A_presyn += Ap
w = clip(w + A_postsyn, 0, wmax)
""",
on_post="""
A_postsyn += Am
w = clip(w + A_presyn, 0, wmax)
""")
STDP.connect(condition="i!=j", p=connection_prob_PC)
STDP.w = w_init
run(400*second, report="text")
weightmx = np.zeros((nPCs, nPCs))
weightmx[STDP.i[:], STDP.j[:]] = STDP.w[:]
return weightmx | 7324a2290e7ec14146be28d2b5a14e10b6ec44e4 | 3,657,767 |
import optparse
def build_arg_parser2():
"""
Build an argument parser using optparse. Use it when python version is 2.5 or 2.6.
"""
usage_str = "Smatch table calculator -- arguments"
parser = optparse.OptionParser(usage=usage_str)
parser.add_option("--fl", dest="fl", type="string", help='AMR ID list file')
parser.add_option("-f", dest="f", type="string", action="callback", callback=cb, help="AMR IDs (at least one)")
parser.add_option("-p", dest="p", type="string", action="callback", callback=cb, help="User list")
parser.add_option("--fd", dest="fd", type="string", help="file directory")
parser.add_option("-r", "--restart", dest="r", type="int", help='Restart number (Default: 4)')
parser.add_option("-v", "--verbose", action='store_true', dest="v", help='Verbose output (Default:False)')
parser.set_defaults(r=4, v=False, ms=False, fd=isi_dir_pre)
return parser | 7118b337bda7b9b170bf3eedf230a5fcd323a17b | 3,657,768 |
def _write_log(path, lines):
"""
:param path: log file path
:param lines: content
:return status:Bool
"""
try:
with open(path, 'w') as file:
logi('open file {log_path} for writting'.format(log_path=path))
file.writelines(lines)
except Exception as e:
loge(e)
return False
return True | f98d87939a23549a82370ce81e7bf28b1ac52b10 | 3,657,769 |
def update_meal_plan(plan_id: str, meal_plan: MealPlan):
""" Updates a meal plan based off ID """
meal_plan.process_meals()
meal_plan.update(plan_id)
# try:
# meal_plan.process_meals()
# meal_plan.update(plan_id)
# except:
# raise HTTPException(
# status_code=404,
# detail=SnackResponse.error("Unable to Update Mealplan"),
# )
return SnackResponse.success("Mealplan Updated") | cf201280d6f89aac5d412af3344e4ba89b29d1ed | 3,657,770 |
def clean_data(df):
"""
INPUT:
df - Panda DataFrame - A data frame that contains the data
OUTPUT:
df - Panda DataFrame - A Cleaned Panda Data frame
"""
#split categories into a data frame and take the first row
cat = df.categories.str.split(';', expand=True)
row = cat.iloc[0]
rew=row.unique()
# Fix columns name
f = []
for x in rew:
r = x[:-2]
f.append(r)
category_colnames = pd.Series(f)
cat.columns = category_colnames
for column in cat:
cat[column] = cat[column].str.strip().str[-1]
# convert column from string to numeric
cat[column] = cat[column].astype('int64')
# concating the categories column with df and dropping unnesscary values
df = df.drop(['categories'], axis = 1)
df = pd.concat([df, cat], axis=1 )
df = df.drop_duplicates()
df.dropna(how='any')
return df
pass | 7d19cf395094fe780da6204f10f80528526083d0 | 3,657,771 |
def vec2transform(v):
"""Convert a pose from 7D vector format ( x y z qx qy qz qw) to transformation matrix form
Args: v pose in 7D vector format
Returns:
T 4x4 transformation matrix
$ rosrun tf tf_echo base os_lidar
- Translation: [-0.084, -0.025, 0.050]
- Rotation: in Quaternion [0.000, 0.000, 0.924, 0.383]
in RPY (radian) [0.000, -0.000, 2.356]
in RPY (degree) [0.000, -0.000, 135.000]
Random quaternion sent by nived in Mattermost
-Rotation: q_BL: [ 0.0122965, -0.002454, 0.9226886, 0.385342]
"""
T_cam_to_os = np.eye(4)
T_cam_to_os[:3, -1] = np.array([-0.084, -0.025, 0.050])
T_cam_to_os[:3, :3] = o3d.geometry.Geometry3D.get_rotation_matrix_from_quaternion(
np.array([0.383, 0.000, 0.000, 0.924])
)
T_os_to_cam = np.linalg.inv(T_cam_to_os)
T = np.eye(4)
T[:3, -1] = v[:3]
T[:3, :3] = o3d.geometry.Geometry3D.get_rotation_matrix_from_quaternion(
np.array([v[6], v[3], v[4], v[5]])
)
return T_os_to_cam @ T @ T_cam_to_os | e3a6023fe2b6bedc7b023d08bb22abd406bb9612 | 3,657,772 |
import re
def cleanup_name_customregex(cname, customregex=None, returnmatches=False):
"""Cleanup the input name given a custom dictionary of regular expressions (format of customregex: a dict like {'regex-pattern': 'replacement'}"""
if customregex is None:
customregex = {'_': ' ',
'repos': '',
'ecg': '',
'[0-9]+': '',
}
matches = set()
# For each pattern
for pattern, replacement in customregex.iteritems():
# First try to see if there is a match and store it if yes
if returnmatches:
m = re.search(pattern, cname, flags=re.I)
if m:
matches.add(m.group(0))
# Then replace the pattern found
cname = re.sub(pattern, replacement, cname, flags=re.I)
# Return both the cleaned name and matches
if returnmatches:
return (cname, matches)
# Return just the cleaned name
else:
return cname | 15cbaf0cb439ba8aa3a550cc7713e19e34d714d4 | 3,657,773 |
import math
def compute_recommended_batch_size_for_trustworthy_experiments(C: int, H: int, W: int, safety_val: float) -> int:
"""
Based on inequality with safety_val=s:
N' >= s*D'
the recommended batch size is, assuming N'=B*H*W and D'=C (so considering neurons as filter, patches as data):
B*H*W >= s*C
leading to any batch size B that satisfies:
B >= (s*C)/(H*W)
for the current layer and model. So, C, H, W are for the current model at that layer.
note:
- recommended way to compute this is to get the largest B after plugging in the C, H, W for all the layers of
your model - essentially computing the "worst-case" B needed for the model.
:return:
"""
recommended_batch_size: int = int(math.ceil(safety_val * C / (H * W)))
assert (recommended_batch_size > 0), 'Batch size that was recommnded was negative, check the input your using.'
return recommended_batch_size | 80f11adb87b252a31aba590c38e60350535025ae | 3,657,774 |
def filter_gradient(t, h_, n_std=3):
"""Filter outliers by evaluating the derivative.
Take derivative and evaluate outliers in derivative.
"""
h = h_.copy()
# NOTE: This must be a separate step
# dh/dt = 0 -> invalid
dhdt = np.gradient(h)
invalid = np.round(dhdt, 6) == 0.0
dhdt[invalid] = np.nan
invalid = np.isnan(dhdt) | (
np.abs(dhdt - np.nanmedian(dhdt)) > mad_std(dhdt) * n_std
)
if sum(invalid) == 0:
return h
h[invalid] = np.nan
return h | 4e4547f38c2886f33c3b42442ff37b9100dda9c7 | 3,657,775 |
import jsonschema
def network_driver_create_endpoint():
"""Creates new Neutron Subnets and a Port with the given EndpointID.
This function takes the following JSON data and delegates the actual
endpoint creation to the Neutron client mapping it into Subnet and Port. ::
{
"NetworkID": string,
"EndpointID": string,
"Options": {
...
},
"Interface": {
"Address": string,
"AddressIPv6": string,
"MacAddress": string
}
}
Then the following JSON response is returned. ::
{
"Interface": {
"Address": string,
"AddressIPv6": string,
"MacAddress": string
}
}
See the following link for more details about the spec:
https://github.com/docker/libnetwork/blob/master/docs/remote.md#create-endpoint # noqa
"""
json_data = flask.request.get_json(force=True)
LOG.debug("Received JSON data %s for "
"/NetworkDriver.CreateEndpoint", json_data)
jsonschema.validate(json_data, schemata.ENDPOINT_CREATE_SCHEMA)
endpoint_id = json_data['EndpointID']
neutron_network_identifier = _make_net_identifier(json_data['NetworkID'],
tags=app.tag)
filtered_networks = _get_networks_by_identifier(neutron_network_identifier)
if not filtered_networks:
return flask.jsonify({
'Err': "Neutron net associated with identifier {0} doesn't exist."
.format(neutron_network_identifier)
})
else:
neutron_network_id = filtered_networks[0]['id']
interface = json_data['Interface'] or {} # Workaround for null
interface_cidrv4 = interface.get('Address', '')
interface_cidrv6 = interface.get('AddressIPv6', '')
interface_mac = interface.get('MacAddress', '')
if not interface_cidrv4 and not interface_cidrv6:
return flask.jsonify({
'Err': "Interface address v4 or v6 not provided."
})
neutron_port, subnets = _create_or_update_port(
neutron_network_id, endpoint_id, interface_cidrv4,
interface_cidrv6, interface_mac)
try:
port_driver = get_driver(neutron_port)
(stdout, stderr) = port_driver.create_host_iface(
endpoint_id, neutron_port, subnets, filtered_networks[0])
LOG.debug(stdout)
if stderr:
LOG.error(stderr)
except (exceptions.VethCreationFailure,
exceptions.BindingNotSupportedFailure) as ex:
with excutils.save_and_reraise_exception():
LOG.error('Preparing the veth '
'pair was failed: %s.', ex)
except processutils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error('Could not bind the Neutron port to '
'the veth endpoint.')
except (exceptions.KuryrException,
n_exceptions.NeutronClientException) as ex:
with excutils.save_and_reraise_exception():
LOG.error('Failed to set up the interface: %s', ex)
if app.vif_plug_is_fatal:
port_active = utils.wait_for_port_active(
app.neutron, neutron_port['id'], app.vif_plug_timeout)
if not port_active:
neutron_port_name = neutron_port['name']
raise exceptions.InactiveResourceException(
"Neutron port {0} did not become active on time."
.format(neutron_port_name))
response_interface = {}
created_fixed_ips = neutron_port['fixed_ips']
subnets_dict_by_id = {subnet['id']: subnet
for subnet in subnets}
if not interface_mac:
response_interface['MacAddress'] = neutron_port['mac_address']
vnic_type = neutron_port.get('binding:vnic_type')
if vnic_type in const.VNIC_TYPES_SRIOV:
response_interface.pop('MacAddress', None)
if not (interface_cidrv4 or interface_cidrv6):
if 'ip_address' in neutron_port:
_process_interface_address(
neutron_port, subnets_dict_by_id, response_interface)
for fixed_ip in created_fixed_ips:
_process_interface_address(
fixed_ip, subnets_dict_by_id, response_interface)
LOG.debug("Response JSON data %s for /NetworkDriver.CreateEndpoint",
{'Interface': response_interface})
return flask.jsonify({'Interface': response_interface}) | 8a2f8fc065064699b60a7854b23cd36844a0ff6d | 3,657,776 |
def generate_valve_from_great_vessel(
label_great_vessel,
label_ventricle,
valve_thickness_mm=8,
):
"""
Generates a geometrically-defined valve.
This function is suitable for the pulmonic and aortic valves.
Args:
label_great_vessel (SimpleITK.Image): The binary mask for the great vessel
(pulmonary artery or ascending aorta)
label_ventricle (SimpleITK.Image): The binary mask for the ventricle (left or right)
valve_thickness_mm (int, optional): Valve thickness, in millimetres. Defaults to 8.
Returns:
SimpleITK.Image: The geometric valve, as a binary mask.
"""
# To speed up binary morphology operations we first crop all images
template_img = 0 * label_ventricle
cb_size, cb_index = label_to_roi(
(label_great_vessel + label_ventricle) > 0, expansion_mm=(20, 20, 20)
)
label_ventricle = crop_to_roi(label_ventricle, cb_size, cb_index)
label_great_vessel = crop_to_roi(label_great_vessel, cb_size, cb_index)
# Convert valve thickness to voxels
_, _, res_z = label_ventricle.GetSpacing()
valve_thickness = int(valve_thickness_mm / res_z)
# Dilate the ventricle
label_ventricle_dilate = sitk.BinaryDilate(label_ventricle, (valve_thickness,) * 3)
# Find the overlap
overlap = label_great_vessel & label_ventricle_dilate
# Mask to thinner great vessel
mask = label_great_vessel | label_ventricle_dilate
overlap = sitk.Mask(overlap, mask)
label_valve = sitk.BinaryMorphologicalClosing(overlap)
# Finally, paste back to the original image space
label_valve = sitk.Paste(
template_img,
label_valve,
label_valve.GetSize(),
(0, 0, 0),
cb_index,
)
return label_valve | 4a40ab9513e6093486a914099316e80a8a24f58e | 3,657,777 |
def current_decay(dataframe,two_components=False):
"""
Fits 95% peak to:
A(t) = A*exp(-t/Taufast) + B*exp(-t/Tauslow) +Iss
Parameters
----------
dataframe : A pandas dataframe
Should be baselined
two_components : True/False
When False, a single exponential component is fitted to the current
decay (B is zero). When True, the sum of two exponential components
is fitted.The default is False.
Returns
-------
A Graph of the current decay with superimposed fit.
Values for fast (and slow, if selected) time constants, or value for single
time constant in mS
"""
# will need to get peak - done
# get steady state
# need to get amplitude of the fast component (peak)
# amplitude of the slower component
# Currently going for an unbinned approach, but can always consider a binned
peak = get_peak(dataframe,decay=True) # gives component A for both fittig routines
peak_to_baseline = dataframe.loc[peak:,:].mean(axis=1)
# using get_Iss() and get_component (both should return amplitude and time
# Normalising times to time of peak current
peak_to_baseline.index = peak_to_baseline.index-(peak_to_baseline.index[0])
#### get 95% current to baseline
current_at_t = peak_to_baseline[peak_to_baseline > (peak_to_baseline.iloc[0]*0.95)]
# get times
t = np.array(current_at_t.index) #####
# reformat current to numpy.array
current_at_t = np.array(current_at_t)
# get Iss
_,Iss = get_Iss(peak_to_baseline)
# fast component,A, peak amplitude
A = current_at_t[0] #####
# preparing figure
if two_components:
xdata = np.zeros([np.size(t),4])
xdata[:,0] = t
xdata[:,1] = A
xdata[:,2] = Iss
_,B = get_component(peak_to_baseline,'slow') # amplitude of slow component
plt.style.use('ggplot')
decayfig,decayaxs = plt.subplots(1)
decayaxs.set_xlabel("t(ms)")
decayaxs.set_ylabel("I(pA)")
xdata[:,3] = B
xdata = xdata.transpose()
times = t*10**3 # rescaling to mS
popt,_ = sp.optimize.curve_fit(double_exp_fit,xdata,current_at_t) # popt = Tfast,Tslow
decayaxs.plot(times,double_exp_fit(xdata,popt[0],popt[1]),linestyle="--",color= 'red',label = "fit")
decayaxs.plot(times,current_at_t,color = 'black',label = "data")
decayaxs.set_title("Decay from 95% Ipeak:baseline. Tauf = {}ms,Taus = {}ms".format((popt[0]*10**3),(popt[1]*10**3)))
decayaxs.legend()
decayfig.tight_layout()
return(popt[0]*10**3,popt[1]*10**3)
else:
xdata = np.zeros([np.size(t),3])
xdata[:,0] = t
xdata[:,1] = A
xdata[:,2] = Iss
xdata = xdata.transpose()
plt.style.use('ggplot')
decayfig,decayaxs = plt.subplots(1)
decayaxs.set_xlabel("t(ms)")
decayaxs.set_ylabel("I(pA)")
times = t*10**3 # rescaling to mS
popt,_ = sp.optimize.curve_fit(exp_fit,xdata,current_at_t) #popt = Tau of single component
decayaxs.plot(times,current_at_t,color = 'black',label = "data")
decayaxs.plot(times,exp_fit(xdata,popt),linestyle="--",color= 'red',label = "fit")
decayaxs.set_title("Decay from 95% Ipeak:baseline. Tau = {}ms".format((popt[0]*10**3)))
decayaxs.legend()
decayfig.tight_layout()
return(popt[0]*10**3) | 048e001b23344b2ac72f6683b0ca1ca66af8dcbd | 3,657,778 |
import ctypes
def get_n_mode_follow(p_state, idx_image=-1, idx_chain=-1):
"""Returns the index of the mode which to follow."""
return int(_MMF_Get_N_Mode_Follow(p_state, ctypes.c_int(idx_image), ctypes.c_int(idx_chain))) | 30a548369b4fd708b3c8c73549b79951c9435667 | 3,657,779 |
import os
def get_possible_centroid_nodes_from_partial_preprocessing(nw_name):
""" this function returns a list of partially preprocessed nodes to used them as zone systems
(for fast routing) """
nw_path = os.path.join(MAIN_DIR, "data", "networks", nw_name)
ppf = os.path.join(nw_path, "base", "tt_matrix.npy")
if os.path.isfile(ppf):
tt_matrx = np.load(ppf)
return [i for i in range(tt_matrx.shape[0])]
else:
raise FileExistsError("file {} not found! not preprocessed?".format(ppf)) | a965888a6f8439b889429702714b6a3673567658 | 3,657,780 |
def divide(lhs, rhs):
"""Division with auto-broadcasting
Parameters
----------
lhs : tvm.Tensor or Expr
The left operand
rhs : tvm.Tensor or Expr
The right operand
Returns
-------
ret : tvm.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.divide(lhs, rhs) | d4a6f3b28eaa35faa300a64e5a81ac64aa6740cb | 3,657,781 |
import gzip
import pickle
def load(filename):
"""Loads a compressed object from disk
"""
file = gzip.GzipFile(filename, 'rb')
buffer = b''
while True:
data = file.read()
if data == b'':
break
buffer += data
object = pickle.loads(buffer)
file.close()
return object | 8a2b9bff8297fdf2824f962df9202b8e08c8d8a1 | 3,657,782 |
from multiprocessing import Queue
from .rabbitmq import Queue
from .beanstalk import Queue
from .redis_queue import Queue
def connect_message_queue(name, url=None, maxsize=0):
"""
create connection to message queue
name:
name of message queue
rabbitmq:
amqp://username:password@host:5672/%2F
Refer: https://www.rabbitmq.com/uri-spec.html
beanstalk:
beanstalk://host:11300/
redis:
redis://host:6379/db
builtin:
None
"""
if not url:
return Queue(maxsize=maxsize)
parsed = urlparse.urlparse(url)
if parsed.scheme == 'amqp':
return Queue(name, url, maxsize=maxsize)
elif parsed.scheme == 'beanstalk':
return Queue(name, host=parsed.netloc, maxsize=maxsize)
elif parsed.scheme == 'redis':
db = parsed.path.lstrip('/').split('/')
try:
db = int(db[0])
except:
db = 0
return Queue(name, parsed.hostname, parsed.port, db=db, maxsize=maxsize)
raise Exception('unknow connection url: %s', url) | 4cb3ea808de8ffd5c157c4fbbf95e78d1ad2b075 | 3,657,783 |
def load_document(filepath):
"""
Description:Opens and loads the file specified by filepath as a raw txt string; assumes valid text file format.
Input: String -> filepath of file from current directory
Output: Entire contents of text file as a string
"""
#assert(filepath.endswith(".txt")), "Function: Load Document -> File specificed by filepath is not of type .txt"
file = open(filepath, 'r')
file_string = file.read()
file.close()
return file_string | b44a3af09ec7c776a1d3bd1a90efe3deb90da821 | 3,657,784 |
def get_user(request, uid):
"""
GET /user/1/
"""
if uid != 1:
return JsonResponse({"code": 10101, "message": "user id null"})
data = {"age": 22, "id": 1, "name": "tom"}
return JsonResponse({"code": 10200, "data": data, "message": "success"}) | 92ef79cf015de5556d1c5715e57cb550a42ef1ca | 3,657,785 |
import collections
import tokenize
def count_ngrams(lines, min_length=1, max_length=3):
"""
Iterate through given lines iterator (file object or list of
lines) and return n-gram frequencies. The return value is a dict
mapping the length of the n-gram to a collections.Counter
object of n-gram tuple and number of times that n-gram occurred.
Returned dict includes n-grams of length min_length to max_length.
"""
lengths = range(min_length, max_length + 1)
ngrams = {length: collections.Counter() for length in lengths}
queue = collections.deque(maxlen=max_length)
# Helper function to add n-grams at start of current queue to dict
def add_queue():
current = tuple(queue)
for length in lengths:
if len(current) >= length:
ngrams[length][current[:length]] += 1
# Loop through all lines and words and add n-grams to dict
for line in lines:
for word in tokenize(line):
queue.append(word)
if len(queue) >= max_length:
add_queue()
# Make sure we get the n-grams at the tail end of the queue
while len(queue) > min_length:
queue.popleft()
add_queue()
return ngrams | 514a313b96d139c3483dc07999f60d1abafd0d91 | 3,657,786 |
def notas(*n, sit = False):
"""
-> Função para analisar notas e situação de vários alunos.
:param n: notas (uma ou mais)
:param sit: situação (valor opcional)
:return: dicionário com várias informaçoes sobre o aluno.
"""
r = {}
r['total'] = len(n)
r['maior'] = max(n)
r['menor'] = min(n)
r['média'] = sum(n) / len(n)
if sit:
if r['média'] >= 7:
r['situação'] = 'boa'
elif r['média'] >= 5:
r['situação'] = 'razoavél'
else:
r['situação'] = 'ruim'
return r | f151c90e22e5eac1c69b59240906e7bf55943321 | 3,657,787 |
import numpy
def accuracy(output, labels_test):
"""How many correct predictions?"""
TP, TN, FP, FN = confusionMatrix(labels_test, numpy.sign(output))
return float(TP + TN) / (TP + TN + FP + FN) | f3801bbe3590e9d271403795d23a737d642fbed8 | 3,657,788 |
def metric_pairs(request):
"""Pairs of (dask-ml, sklearn) accuracy metrics.
* accuracy_score
"""
return (
getattr(dask_ml.metrics, request.param),
getattr(sklearn.metrics, request.param)
) | e82b799c06e41c4fea19cd33e9d836b1b03d02df | 3,657,789 |
import time
from datetime import datetime
def MyDuration(duration, initial_time=None):
"""
Usecase:
a timestamp is provided as when an access token expires,
then add it to the current time, then showing it as a human-readable
future time.
Alternatively specify a *initial_time* as manual now value.
Args
duration: <type 'int'> OR <type 'str'> Duration in seconds.
If given as a string, convert to int.
initial_time: <type 'int'> OR <type 'str'> Time to start differenc
calculation from. If given as a string, convert to int.
If not set, use current time.
Returns
out_time: what time will it be after number seconds in have elapsed.
Shows in format '2016-12-11 15:40:00' if printed.
"""
duration = int(duration)
if initial_time:
initial_time = int(initial_time)
else:
initial_time = time.time() # use current time
in_time = initial_time + duration # add duration to start time
out_time = datetime.datetime.fromtimestamp(in_time)
return out_time | d81a12295c2715ab9ed93595ccee2af2474c671e | 3,657,790 |
def orthogonal_init(shape, gain=1.0):
"""Generating orthogonal matrix"""
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_rows, num_cols)
# Generate a random matrix
a = np.random.normal(size=flat_shape).astype(np.float32)
# Compute the qr factorization
q, r = np.linalg.qr(a, mode='reduced')
# Make Q uniform
square_len = np.minimum(num_rows, num_cols)
d = np.diagonal(r[:square_len, :square_len])
ph = d / np.absolute(d)
q *= ph
# Pad zeros to Q (if rows smaller than cols)
if num_rows < num_cols:
padding = np.zeros([num_rows, num_cols - num_rows], dtype=np.float32)
q = np.concatenate([q, padding], 1)
return gain * np.reshape(q, shape) | 4d7b1f81a13228e4185d59e0fd23ba629888a232 | 3,657,791 |
def application(env, start_response):
"""The callable function per the WSGI spec; PEP 333"""
headers = {x[5:].replace('_', '-'):y for x, y in env.items() if x.startswith('HTTP_')}
if env.get('CONTENT_TYPE', None):
headers['Content-Type'] = env['CONTENT_TYPE']
if env.get('CONTENT_LENGTH', None):
headers['Content-Length'] = env['CONTENT_LENGTH']
headers.pop('CONNECTION', None) # let RelayQuery choose to use keepalives or not
body = env['wsgi.input']
uri = env.get('PATH_INFO', '')
if not uri:
# Some WSGI servers use RAW_URI instead of PATH_INFO.
# Gunicorn uses PATH_INFO, gevent.pywsgi.WSGIServer uses RAW_URI
uri = env.get('RAW_URI', '')
token = env.get('HTTP_X_AUTH', '').encode()
host, tls, port = router.get_host(uri=uri, token=token)
if env.get('QUERY_STRING', None):
uri += '?{}'.format(env['QUERY_STRING'])
resp = RelayQuery(host=host,
method=env['REQUEST_METHOD'],
uri=uri,
headers=headers,
body=body,
port=port,
tls=tls)
start_response(resp.status, resp.headers)
return resp | 1c5cbb62316b4170bbd54ef8fa404eccc32b8441 | 3,657,792 |
def angle_normalize(x):
"""
Normalize angles between 0-2PI
"""
return ((x + np.pi) % (2 * np.pi)) - np.pi | 0c39dcf67a5aae2340a65173e6a866e429b4d176 | 3,657,793 |
import torch
def load_data(BASE_DIR, DATA_DIR):
"""
Loads data necessary for project
Arguments:
BASE_DIR (str) -- path to working dir
DATA_DIR (str) -- path to KEGG data
Returns:
tla_to_mod_to_kos (defaultdict of dicts) -- maps tla to series of dicts, keys are KEGG modules and values are lists of KOs in that module (e.g.: 'eun': {'M00001': ['K00845', etc]}, etc} etc})
mod_sets (defaultdict) -- raw data from KEGG defining which KOs are in each module
tla_to_tnum (dict) -- for each genome, converts tla to tnum
tnum_to_tla (dict) -- for each genome, converts tnum to tla
keepers (list) -- KEGG genomes selected for inclusion in this study
tnum_to_kos (dict) -- maps tnums to KOs encoded by that genome, e.g.: 'T00001': [K00001, ... 'K0000N']
n_kos_tot (int) -- total number of KOs in the dataset
all_kos (list) -- list of all KOs in the dataset
mod_to_ko_clean (dict )-- the functions of many modules can be "completed" by different sets of genes. Here we choose to represent each module by the most common set of genes. Dict maps each module (e.g.: 'K00001') to a list of genes (e.g.: ['K00845', ..., 'K00873'])
train_data (numpy.ndarray) -- training data. Rows are genomes, columns are genes/KOs. 1's denote presence of a gene in the genome, 0's denote absence
test_data (numpy.ndarray) -- test data. Rows are genomes, columns are genes/KOs. 1's denote presence of a gene in the genome, 0's denote absence
train_genomes (list) -- tnums of genomes in the training set
test_genomes (list) -- tnums of genomes in the test set
"""
tla_to_mod_to_kos, mod_sets = load_mods(DATA_DIR) # path to dir with tla_to_mod_to_kos.pkl
tla_to_tnum, tnum_to_tla, keepers = genomes2include(DATA_DIR)
tnum_to_kos, n_kos_tot, all_kos = load_kos(tla_to_tnum, tnum_to_tla, tla_to_mod_to_kos, DATA_DIR)
mod_to_kos = create_mod_to_kos(tla_to_mod_to_kos)
mod_to_ko_clean = clean_kos(mod_sets)
all_kos = torch.load(BASE_DIR+"all_kos_2020-09-29.pt")
tla_to_mod_to_kos = torch.load(BASE_DIR+"tla_to_mod_to_kos_2020-09-29.pt")
train_data = torch.load(BASE_DIR+"kegg_v2_train_2020-09-29.pt")
test_data = torch.load(BASE_DIR+"kegg_v2_test_2020-09-29.pt")
train_genomes = torch.load(BASE_DIR+"kegg_v2_train_genomes_2020-09-29.pt")
test_genomes = torch.load(BASE_DIR+"kegg_v2_test_genomes_2020-09-29.pt")
return tla_to_mod_to_kos, mod_sets, tla_to_tnum, tnum_to_tla, keepers, tnum_to_kos, n_kos_tot, all_kos, mod_to_ko_clean, all_kos, train_data, test_data, train_genomes, test_genomes | d32a6be8cae02ac6ab5903f055326f27d0a549c5 | 3,657,794 |
def disable_app(app, base_url=DEFAULT_BASE_URL):
"""Disable App.
Disable an app to effectively remove it from your Cytoscape session without having to uninstall it.
Args:
app (str): Name of app
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
dict: {'appName': <name of app>}, and is returned whether or not app exists
Raises:
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> disable_app('stringApp')
{'appName': 'stringApp'}
"""
verify_supported_versions(1, 3.7, base_url=base_url)
res = commands.commands_post(f'apps disable app="{app}"', base_url=base_url)
return narrate(res) | 2a2a00609b0acb6090219d24fcc7d24ed8f7dc7e | 3,657,795 |
import pandas as pd
def wrangle_adni():
"""This function returns three dataframes.
Unpack the dataframes when calling the function.
"""
# ensure pandas availability for the function
if 'pd' not in globals():
# read in the data to a pandas dataframe
adni_full = pd.read_csv('ADNIMERGE.csv', dtype='object')
# set the logical orders for the two diagnoses
DX = ['CN', 'MCI', 'AD']
DX_bl = ['CN', 'SMC', 'EMCI', 'LMCI', 'AD']
# initialize empty dataframe
adni = pd.DataFrame()
# convert datatypes to categorical, datetime, and int
adni.loc[:, 'PTGENDER'] = pd.Categorical(adni_full.PTGENDER)
adni.loc[:, 'DX'] = pd.Categorical(adni_full.DX, ordered=True, categories=DX)
adni.loc[:, 'DX_bl'] = pd.Categorical(adni_full.DX_bl, ordered=True, categories=DX_bl)
adni.loc[:, 'EXAMDATE'] = pd.to_datetime(adni_full['EXAMDATE'])
adni.loc[:, 'EXAMDATE_bl'] = pd.to_datetime(adni_full['EXAMDATE_bl'])
adni.loc[:, 'PTEDUCAT'] = adni_full.PTEDUCAT.astype('int')
adni.loc[:, 'Month'] = adni_full.Month.astype('int')
adni.loc[:, 'RID'] = adni_full.RID.astype('int')
# create a list of float data columns, loop and assign float dtypes
floats = ['AGE', 'CDRSB', 'ADAS11', 'ADAS13', 'MMSE', 'RAVLT_immediate', 'Hippocampus',
'Ventricles', 'WholeBrain', 'Entorhinal', 'MidTemp', 'FDG', 'AV45']
# loop and assign dtypes
for i in floats:
adni.loc[:, i] = adni_full[i].astype('float')
# age has no baseline '_bl' equivalent
if i == 'AGE':
continue
# every other column has a '_bl' equivalent to convert as well
else:
y = i + '_bl'
adni.loc[:, y] = adni_full[y].astype('float')
# drop columns with too much missing data
adni.drop(labels=['FDG', 'FDG_bl', 'AV45', 'AV45_bl'], axis='columns', inplace=True)
# set the index
adni.set_index(adni.RID, inplace=True)
# sort the index
adni.sort_index(inplace=True)
# remove redundant columns
adni.drop('RID', axis='columns', inplace=True)
# calculate dynamic age
adni.loc[:, 'AGE_dynamic'] = adni.AGE + (adni.Month / 12)
# create dataframe with only patients that have complete data
adni_rmv = adni.dropna(how='any')
# filter those results to only patients with multiple visits
num_comp_exams = adni_rmv.groupby('RID')['EXAMDATE_bl'].count()
adni_comp_filter = num_comp_exams[num_comp_exams > 1]
adni_comp = adni_rmv.loc[adni_comp_filter.index]
# map baseline diagnosis categories to match subsequent diagnosis categories
# map new column for DX_bl to categorize based on subsequent DX categories
# 'SMC' -> 'CN' due to medical definitions
# combine 'LMCI' and 'EMCI' into 'MCI'
mapper = {'SMC': 'CN', 'LMCI': 'MCI', 'EMCI': 'MCI', 'CN': 'CN', 'AD': 'AD'}
adni_comp.loc[:, 'DX_bl2'] = adni_comp.DX_bl.map(mapper)
# isolate clinical data
clin_cols = ['EXAMDATE', 'EXAMDATE_bl', 'Month', 'PTGENDER', 'DX', 'DX_bl', 'PTEDUCAT', 'AGE', 'AGE_dynamic',
'CDRSB', 'CDRSB_bl', 'ADAS11', 'ADAS11_bl', 'ADAS13', 'ADAS13_bl', 'MMSE',
'MMSE_bl', 'RAVLT_immediate', 'RAVLT_immediate_bl', 'DX_bl2']
clin_data = pd.DataFrame()
clin_data = adni.reindex(columns=clin_cols)
# filter the scan data
scan_cols = ['EXAMDATE', 'EXAMDATE_bl', 'Month', 'PTGENDER', 'DX', 'DX_bl', 'PTEDUCAT', 'AGE', 'AGE_dynamic',
'Hippocampus', 'Hippocampus_bl', 'Ventricles', 'Ventricles_bl', 'WholeBrain', 'WholeBrain_bl',
'Entorhinal', 'Entorhinal_bl', 'MidTemp', 'MidTemp_bl', 'DX_bl2']
scan_data = pd.DataFrame()
scan_data = adni.reindex(columns=scan_cols)
return adni_comp, clin_data, scan_data | 58fb853731bcafdd297cd211242b043da365b2dc | 3,657,796 |
def readByte (file):
""" Read a byte from file. """
return ord (file.read (1)) | 4e82d1b688d7742fd1dd1025cd7ac1ccb13bbca0 | 3,657,797 |
import json
import requests
def qa_tfserving(data_input, url):
""" tf-serving 一整套流程 """
bert_input = covert_text_to_id(data_input)
data = json.dumps(bert_input)
r = requests.post(url, data)
r_text_json = json.loads(r.text)
r_post = postprocess(r_text_json)
return r_post | 1192982521dedda82ccf4aaa1bd1a3b29f445371 | 3,657,798 |
def contract_creation_exceptions():
"""
Return create exceptions.
These elements depend on the networksegments table which was renamed
in the contract branch.
"""
return {
sa.Table: ['segmenthostmappings'],
sa.Index: ['segmenthostmappings']
} | 842a7d604c1211d629d28533dbd8bdb6df451f49 | 3,657,799 |
Subsets and Splits