content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_image_ids(idol_id):
"""Returns all image ids an idol has."""
c.execute("SELECT id FROM groupmembers.imagelinks WHERE memberid=%s", (idol_id,))
all_ids = {'ids': [current_id[0] for current_id in c.fetchall()]}
return all_ids | 036dfdc9d757b2a7c70b26653252bbb5e180a5f1 | 22,154 |
def sortarai(datablock, s, Zdiff, **kwargs):
"""
sorts data block in to first_Z, first_I, etc.
Parameters
_________
datablock : Pandas DataFrame with Thellier-Tellier type data
s : specimen name
Zdiff : if True, take difference in Z values instead of vector difference
NB: this should always be False
**kwargs :
version : data model. if not 3, assume data model = 2.5
Returns
_______
araiblock : [first_Z, first_I, ptrm_check,
ptrm_tail, zptrm_check, GammaChecks]
field : lab field (in tesla)
"""
if 'version' in list(kwargs.keys()) and kwargs['version'] == 3:
dec_key, inc_key = 'dir_dec', 'dir_inc'
Mkeys = ['magn_moment', 'magn_volume', 'magn_mass', 'magnitude']
meth_key = 'method_codes'
temp_key, dc_key = 'treat_temp', 'treat_dc_field'
dc_theta_key, dc_phi_key = 'treat_dc_field_theta', 'treat_dc_field_phi'
# convert dataframe to list of dictionaries
datablock = datablock.to_dict('records')
else:
dec_key, inc_key = 'measurement_dec', 'measurement_inc'
Mkeys = ['measurement_magn_moment', 'measurement_magn_volume',
'measurement_magn_mass', 'measurement_magnitude']
meth_key = 'magic_method_codes'
temp_key, dc_key = 'treatment_temp', 'treatment_dc_field'
dc_theta_key, dc_phi_key = 'treatment_dc_field_theta', 'treatment_dc_field_phi'
first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], []
field, phi, theta = "", "", ""
starthere = 0
Treat_I, Treat_Z, Treat_PZ, Treat_PI, Treat_M = [], [], [], [], []
ISteps, ZSteps, PISteps, PZSteps, MSteps = [], [], [], [], []
GammaChecks = [] # comparison of pTRM direction acquired and lab field
rec = datablock[0]
for key in Mkeys:
if key in list(rec.keys()) and rec[key] != "":
momkey = key
break
# first find all the steps
for k in range(len(datablock)):
rec = datablock[k]
temp = float(rec[temp_key])
methcodes = []
tmp = rec[meth_key].split(":")
for meth in tmp:
methcodes.append(meth.strip())
if 'LT-T-I' in methcodes and 'LP-TRM' not in methcodes and 'LP-PI-TRM' in methcodes:
Treat_I.append(temp)
ISteps.append(k)
if field == "":
field = float(rec[dc_key])
if phi == "":
phi = float(rec[dc_phi_key])
theta = float(rec[dc_theta_key])
# stick first zero field stuff into first_Z
if 'LT-NO' in methcodes:
Treat_Z.append(temp)
ZSteps.append(k)
if 'LT-T-Z' in methcodes:
Treat_Z.append(temp)
ZSteps.append(k)
if 'LT-PTRM-Z' in methcodes:
Treat_PZ.append(temp)
PZSteps.append(k)
if 'LT-PTRM-I' in methcodes:
Treat_PI.append(temp)
PISteps.append(k)
if 'LT-PTRM-MD' in methcodes:
Treat_M.append(temp)
MSteps.append(k)
if 'LT-NO' in methcodes:
dec = float(rec[dec_key])
inc = float(rec[inc_key])
st = float(rec[momkey])
first_I.append([273, 0., 0., 0., 1])
first_Z.append([273, dec, inc, st, 1]) # NRM step
for temp in Treat_I: # look through infield steps and find matching Z step
if temp in Treat_Z: # found a match
istep = ISteps[Treat_I.index(temp)]
irec = datablock[istep]
methcodes = []
tmp = irec[meth_key].split(":")
for meth in tmp:
methcodes.append(meth.strip())
# take last record as baseline to subtract
brec = datablock[istep - 1]
zstep = ZSteps[Treat_Z.index(temp)]
zrec = datablock[zstep]
# sort out first_Z records
if "LP-PI-TRM-IZ" in methcodes:
ZI = 0
else:
ZI = 1
dec = float(zrec[dec_key])
inc = float(zrec[inc_key])
st = float(zrec[momkey])
first_Z.append([temp, dec, inc, st, ZI])
# sort out first_I records
try:
idec = float(irec[dec_key])
iinc = float(irec[inc_key])
istr = float(irec[momkey])
except TypeError as ex:
raise Exception('Malformed data of some sort for dec/inc/moment in measurement: {}. You must fix this before proceeding.\n Bad record: {}'.format(irec.get('measurement', ''), irec))
X = dir2cart([idec, iinc, istr])
BL = dir2cart([dec, inc, st])
I = []
for c in range(3):
I.append((X[c] - BL[c]))
if I[2] != 0:
iDir = cart2dir(I)
if Zdiff == 0:
first_I.append([temp, iDir[0], iDir[1], iDir[2], ZI])
else:
first_I.append([temp, 0., 0., I[2], ZI])
gamma = angle([iDir[0], iDir[1]], [phi, theta])
else:
first_I.append([temp, 0., 0., 0., ZI])
gamma = 0.0
# put in Gamma check (infield trm versus lab field)
if 180. - gamma < gamma:
gamma = 180. - gamma
GammaChecks.append([temp - 273., gamma])
for temp in Treat_PI: # look through infield steps and find matching Z step
step = PISteps[Treat_PI.index(temp)]
rec = datablock[step]
dec = float(rec[dec_key])
inc = float(rec[inc_key])
st = float(rec[momkey])
brec = datablock[step - 1] # take last record as baseline to subtract
pdec = float(brec[dec_key])
pinc = float(brec[inc_key])
pint = float(brec[momkey])
X = dir2cart([dec, inc, st])
prevX = dir2cart([pdec, pinc, pint])
I = []
for c in range(3):
I.append(X[c] - prevX[c])
dir1 = cart2dir(I)
if Zdiff == 0:
ptrm_check.append([temp, dir1[0], dir1[1], dir1[2]])
else:
ptrm_check.append([temp, 0., 0., I[2]])
# in case there are zero-field pTRM checks (not the SIO way)
for temp in Treat_PZ:
step = PZSteps[Treat_PZ.index(temp)]
rec = datablock[step]
dec = float(rec[dec_key])
inc = float(rec[inc_key])
st = float(rec[momkey])
brec = datablock[step - 1]
pdec = float(brec[dec_key])
pinc = float(brec[inc_key])
pint = float(brec[momkey])
X = dir2cart([dec, inc, st])
prevX = dir2cart([pdec, pinc, pint])
I = []
for c in range(3):
I.append(X[c] - prevX[c])
dir2 = cart2dir(I)
zptrm_check.append([temp, dir2[0], dir2[1], dir2[2]])
# get pTRM tail checks together -
for temp in Treat_M:
# tail check step - just do a difference in magnitude!
step = MSteps[Treat_M.index(temp)]
rec = datablock[step]
st = float(rec[momkey])
if temp in Treat_Z:
step = ZSteps[Treat_Z.index(temp)]
brec = datablock[step]
pint = float(brec[momkey])
# X=dir2cart([dec,inc,st])
# prevX=dir2cart([pdec,pinc,pint])
# I=[]
# for c in range(3):I.append(X[c]-prevX[c])
# d=cart2dir(I)
# ptrm_tail.append([temp,d[0],d[1],d[2]])
# difference - if negative, negative tail!
ptrm_tail.append([temp, 0, 0, st - pint])
else:
print(
s, ' has a tail check with no first zero field step - check input file! for step', temp - 273.)
#
# final check
#
if len(first_Z) != len(first_I):
print(len(first_Z), len(first_I))
print(" Something wrong with this specimen! Better fix it or delete it ")
input(" press return to acknowledge message")
araiblock = (first_Z, first_I, ptrm_check,
ptrm_tail, zptrm_check, GammaChecks)
return araiblock, field | 5127b293b63a0d67da9e0ec21ff3085beea5a836 | 22,155 |
import json
def _extract_then_dump(hex_string: str) -> str:
"""Extract compressed content json serialized list of paragraphs."""
return json.dumps(
universal_extract_paragraphs(
unpack(bytes.fromhex(hex_string))
)
) | 6be6f045ff86580e5d33239e8b8232f5c343723b | 22,157 |
import base64
import hmac
import hashlib
def sso_redirect_url(nonce, secret, email, external_id, username, name, avatar_url, is_admin , **kwargs):
"""
nonce: returned by sso_validate()
secret: the secret key you entered into Discourse sso secret
user_email: email address of the user who logged in
user_id: the internal id of the logged in user
user_username: username of the logged in user
return value: URL to redirect users back to discourse, now logged in as user_username
"""
kwargs.update({
'nonce': nonce,
'email': email,
'external_id': external_id,
'username': username,
'name' : name,
'avatar_url' : avatar_url,
'avatar_force_update' : 'true',
'admin':is_admin
})
uencode = urlencode(kwargs)
return_payload = base64.encodestring(uencode.encode())
h = hmac.new(secret.encode(), return_payload, digestmod=hashlib.sha256)
query_string = urlencode({'sso': return_payload, 'sig': h.hexdigest()})
return '/session/sso_login?%s' % query_string | 7aefb1c67a23f0c1311ce4297760a13ea39769fe | 22,158 |
def normalized_cluster_entropy(cluster_labels, n_clusters=None):
""" Cluster entropy normalized by the log of the number of clusters.
Args:
cluster_labels (list/np.ndarray): Cluster labels
Returns:
float: Shannon entropy / log(n_clusters)
"""
if n_clusters is None:
n_clusters = len(np.unique(cluster_labels))
counts = np.unique(cluster_labels, return_counts=True)[1]
return entropy(counts) / np.log(n_clusters) | aaed21c7cd91e1b61eb8a8336978eded8673960e | 22,159 |
def ingest_data(data, schema=None, date_format=None, field_aliases=None):
"""
data: Array of Dictionary objects
schema: PyArrow schema object or list of column names
date_format: Pandas datetime format string (with schema only)
field_aliases: dict mapping Json field names to desired schema names
return: a PyArrow Batch
"""
if isinstance(schema, list) and isinstance(field_aliases, dict):
return _convert_data_with_column_names_dict(data, field_aliases)
elif isinstance(schema, dict):
return _convert_data_with_column_names_dict(data, schema)
elif isinstance(schema, list):
return _convert_data_with_column_names(data, schema)
elif isinstance(schema, pa.Schema):
return _convert_data_with_schema(data, schema, date_format=date_format, field_aliases=field_aliases)
else:
return _convert_data_without_schema(data) | 94ac4c13a71275485a79404f20c702886b39fb1c | 22,160 |
def build_messages(missing_scene_paths, update_stac):
""" """
message_list = []
error_list = []
for path in missing_scene_paths:
landsat_product_id = str(path.strip("/").split("/")[-1])
if not landsat_product_id:
error_list.append(
f"It was not possible to build product ID from path {path}"
)
message_list.append(
{
"Message": {
"landsat_product_id": landsat_product_id,
"s3_location": str(path),
"update_stac": update_stac,
}
}
)
return {"message_list": message_list, "failed": error_list} | 07865a38fad6e3f642d3e55b80fac734dbb7d94b | 22,161 |
def DecrementPatchNumber(version_num, num):
"""Helper function for `GetLatestVersionURI`.
DecrementPatchNumber('68.0.3440.70', 6) => '68.0.3440.64'
Args:
version_num(string): version number to be decremented
num(int): the amount that the patch number need to be reduced
Returns:
string: decremented version number
"""
version_num_list = version_num.split('.')
version_num_list[-1] = str(int(version_num_list[-1]) - num)
assert int(version_num_list[-1]) >= 0, 'patch number cannot be negative'
return '.'.join(version_num_list) | e42585791063d7982675065be7480ae7b5ea637d | 22,162 |
def hi_means(steps, edges):
"""This applies kmeans in a hierarchical fashion.
:param edges:
:param steps:
:returns: a tuple of two arrays, ´´kmeans_history´´ containing a number of
arrays of varying lengths and ´´labels_history´´, an array of length equal
to edges.shape[0]
"""
sub_edges = edges
kmeans_history = []
labels_history = []
for _ in xrange(steps):
kmeans = nkm.kmeans(sub_edges.shape[0] / 2, sub_edges)
sub_edges = kmeans[0]
kmeans_history += [kmeans[0]]
labels_history += [kmeans[1]]
kmeans_history = np.array(kmeans_history)
labels_history = np.array(labels_history)
return kmeans_history, labels_history | 60d242f1ed9a4009bac706053e56f0d450ca7a7a | 22,163 |
def tag_item(tag_name, link_flag=False):
"""
Returns Items tagged with tag_name
ie. tag-name: django will return items tagged django.
"""
print C3 % ("\n_TAGGED RESULTS_")
PAYLOAD["tag"] = tag_name
res = requests.post(
GET_URL, data=json.dumps(PAYLOAD), headers=HEADERS, verify=False)
if res.json()['status'] == 2:
print C3 % ("Invalid tag: Tag not found!")
exit()
return render(res.json()['list'], link_flag=link_flag) | 038b6a81dec1ea7c6fb9c4d83eaa6425d950c2fd | 22,164 |
def movie_info(tmdb_id):
"""Renders salient movie data from external API."""
# Get movie info TMDB database.
print("Fetching movie info based on tmdb id...")
result = TmdbMovie.get_movie_info_by_id(tmdb_id)
# TMDB request failed.
if not result['success']:
print("Error!")
# Can't find movie referenced by id.
if result['status_code'] == 404:
abort(404)
else:
# Some other error, e.g. 429: too many request.
err_message = f"TMDB API query failed; HTTP response = {result['status_code']}"
return render_template("errors/misc-error.html",
err_message=err_message)
# Collect movie object.
movie = result['movie']
# To check a user's personal movie list, user must be logged in.
# Also, limiting the fetching of NYT movie reviews to authenticated users.
# This will speed up display of movie info for anonymous users as NYT review
# fetching requires time delays between API requests.
# See whether movie is already on user's list.
on_user_list, film_list_item_id = False, None
# Get search-engine queries for movie.
search_engines = {
'Google': movie.get_query('google'),
'DuckDuckGo': movie.get_query('duckduckgo')
}
if current_user.is_authenticated:
# CHECK PERSONAL MOVIE LIST!!!
print(f"Checking whether '{movie.title}' on user list...")
film = FilmListItem.query.filter_by(tmdb_id=tmdb_id,
user_id=current_user.id).first()
if film:
on_user_list = True
film_list_item_id = film.id
# on_user_list = True if film else False
print(f"On user list? {on_user_list}, id: {film_list_item_id}")
return render_template("movie.html",
movie=movie,
on_user_list=on_user_list,
search_engines=search_engines) | ee7188eddcc50d0114ae5b80bc753e803632d557 | 22,165 |
def diabetic(y, t, ui, dhat):
"""
Expanded Bergman Minimal model to include meals and insulin
Parameters for an insulin dependent type-I diabetic
States (6):
In non-diabetic patients, the body maintains the blood glucose
level at a range between about 3.6 and 5.8 mmol/L (64.8 and
104.4 mg/dL with 1:18 conversion between mmol/L and mg/dL)
:param y: input state
:param t: time step
:param ui: Insulin infusion rate (mU/min)
:param dhat: Meal disturbance (mmol/L-min)
:return: change in states
"""
g = y[0] # blood glucose (mg/dL)
x = y[1] # remote insulin (micro-u/ml)
i = y[2] # plasma insulin (micro-u/ml)
q1 = y[3] # S1
q2 = y[4] # S2
g_gut = y[5] # gut blood glucose (mg/dl)
# Parameters:
gb = 291.0 # (mg/dL) Basal Blood Glucose
p1 = 3.17e-2 # 1/min
p2 = 1.23e-2 # 1/min
si = 2.9e-2 # 1/min * (mL/micro-U)
ke = 9.0e-2 # 1/min Insulin elimination from plasma
kabs = 1.2e-2 # 1/min t max,G inverse
kemp = 1.8e-1 # 1/min t max,I inverse
f = 8.00e-1 # L
vi = 12.0 # L Insulin distribution volume
vg = 12.0 # L Glucose distibution volume
# Compute ydot:
dydt = np.empty(6)
dydt[0] = -p1 * (g - gb) - si * x * g + f * kabs / vg * g_gut + f / vg * dhat # (1)
dydt[1] = p2 * (i - x) # remote insulin compartment dynamics (2)
dydt[2] = -ke * i + ui # plasma insulin concentration (3)
dydt[3] = ui - kemp * q1 # two-part insulin absorption model dS1/dt
dydt[4] = -kemp * (q2 - q1) # two-part insulin absorption model dS2/dt
dydt[5] = kemp * q2 - kabs * g_gut
# convert from minutes to hours
dydt = dydt * 60
return dydt | 35949f9a3d6010e89346ebb7f2818230ca6148a0 | 22,166 |
def get_spacy_sentences(doc_text):
"""
Split given document into its sentences
:param doc_text: Text to tokenize
:return: list of spacy sentences
"""
doc = _get_spacy_nlp()(doc_text)
return list(doc.sents) | 30345e04add02fd74e8470ce667fee60ddc7140d | 22,167 |
def get_recommendations(commands_fields, app_pending_changes):
"""
:param commands_fields:
:param app_pending_changes:
:return: List of object describing command to run
>>> cmd_fields = [
... ['cmd1', ['f1', 'f2']],
... ['cmd2', ['prop']],
... ]
>>> app_fields = {
... 'f2': {'field': 'f2', 'user': 'api', 'updated': '00:00'}
... }
>>> from pprint import pprint
>>> pprint(get_recommendations(cmd_fields, app_fields))
[{'command': 'cmd1', 'field': 'f2', 'updated': '00:00', 'user': 'api'}]
"""
recommended_cmds = []
for cmd in commands_fields:
cmd_name = cmd[0]
cmd_fields = cmd[1]
for field in cmd_fields:
if field in app_pending_changes.keys():
recommended_cmds.append({
'command': cmd_name,
'field': field,
'user': app_pending_changes[field]['user'],
'updated': app_pending_changes[field]['updated'],
})
break
return recommended_cmds | 03fa583a5d4ea526cfeaa671418488218e1b227f | 22,168 |
import collections
def file_based_convert_examples_to_features(examples,
label_list,
max_seq_length,
tokenizer,
output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
def create_int_feature(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer)
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close() | efb6a0d347ae3a99a5859cbd3e0c1216d09377e6 | 22,169 |
def hello():
"""Return a friendly HTTP greeting."""
return 'Hello World!!!' | ae3528d10f94c92c169f53d5c3897572c9032bc2 | 22,170 |
from typing import Union
from typing import List
from typing import Tuple
def _findStress(
syllables: Union[List[Syllable], List[List[str]]]
) -> Tuple[List[int], List[int]]:
"""Find the syllable and phone indicies for stress annotations"""
tmpSyllables = [_toSyllable(syllable) for syllable in syllables]
stressedSyllables: List[int] = []
stressedPhones: List[int] = []
for syllableI, syllable in enumerate(tmpSyllables):
for phoneI, phone in enumerate(syllable.phonemes):
if "ˈ" in phone:
stressedSyllables.insert(0, syllableI)
stressedPhones.insert(0, phoneI)
break
if "ˌ" in phone:
stressedSyllables.append(syllableI)
stressedPhones.append(phoneI)
return stressedSyllables, stressedPhones | d4adc4b6156e4d823640a29815d75832c21f68ac | 22,171 |
from hydrus.data.helpers import get_path_from_type
import random
import string
def gen_dummy_object(class_title, doc):
"""
Create a dummy object based on the definitions in the API Doc.
:param class_title: Title of the class whose object is being created.
:param doc: ApiDoc.
:return: A dummy object of class `class_title`.
"""
object_ = {
"@type": class_title
}
expanded_base_url = DocUrl.doc_url
for class_path in doc.collections:
if class_title == doc.collections[class_path]["collection"].name:
members = list()
manages_class_titles = list()
collection_manages = doc.collections[class_title]["collection"].manages
if type(collection_manages) is dict:
# only one manages block
manages_class = collection_manages['object'].split(expanded_base_url)[1]
manages_class_titles.append(manages_class)
elif type(collection_manages) is list:
# multiple manages block
for manages_block in collection_manages:
manages_class = collection_manages['object'].split(expanded_base_url)[1]
manages_class_titles.append(manages_class)
for _ in range(3):
member_class = random.choice(manages_class_titles)
member = gen_dummy_object(member_class, doc)
member_id = crud.insert(object_=member,
session=get_session(),
collection=False)
member_class_path = get_path_from_type(member_class)
member_api_path = f'/{get_api_name()}/{member_class_path}/{member_id}'
members.append({
"@id": member_api_path,
"@type": member_class,
})
object_['members'] = members
return object_
for class_path in doc.parsed_classes:
if class_title == doc.parsed_classes[class_path]["class"].title:
for prop in doc.parsed_classes[class_path]["class"].supportedProperty:
if prop.write is False:
continue
if isinstance(prop.prop, HydraLink):
object_[prop.title] = ''.join(random.choice(
string.ascii_uppercase + string.digits) for _ in range(6))
pass
elif expanded_base_url in prop.prop:
prop_class = prop.prop.split(expanded_base_url)[1]
object_[prop.title] = gen_dummy_object(prop_class, doc)
else:
type_ = prop.kwargs.get('range')
if type_ is not None:
object_[prop.title] = random.randint(50,100)
else:
object_[prop.title] = ''.join(random.choice(
string.ascii_uppercase + string.digits) for _ in range(6))
return object_ | 88f096a483699b9126496564cfe755386012acce | 22,172 |
def gather_info(arguments) -> Info:
"""Gather info."""
if arguments.integration:
info = {"domain": arguments.integration}
elif arguments.develop:
print("Running in developer mode. Automatically filling in info.")
print()
info = {"domain": "develop"}
else:
info = _gather_info(
{
"domain": {
"prompt": "What is the domain?",
"validators": [
CHECK_EMPTY,
[
"Domains cannot contain spaces or special characters.",
lambda value: value == slugify(value),
],
],
}
}
)
info["is_new"] = not (COMPONENT_DIR / info["domain"] / "manifest.json").exists()
if not info["is_new"]:
return _load_existing_integration(info["domain"])
if arguments.develop:
info.update(
{
"name": "Develop Hub",
"codeowner": "@developer",
"requirement": "aiodevelop==1.2.3",
"oauth2": True,
}
)
else:
info.update(gather_new_integration(arguments.template == "integration"))
return Info(**info) | 4cc527373fe29b36526388716f2402101039cea2 | 22,173 |
def __is_geotagging_input(question_input, _):
"""Validates the specified geotagging input configuration.
A geotagging input configuration contains the following optional fields:
- location: a string that specifies the input's initial location.
Args:
question_input (dict): An input configuration to validate.
Returns:
<bool, str|None>: A pair containing the value True if the specified configuration
is valid, False otherwise; as well as an error message in case it is invalid.
"""
location = question_input.get("location")
if location is not None:
message = "A geotagging input's 'location' field must be a non-empty string."
try:
if is_empty_string(location):
return (False, message)
except TypeError:
return (False, message)
return (True, None) | 8fc392f832cc6d5c38eb5ffd2e7e20ff50b4ffd3 | 22,176 |
def _be_num_input(num_type, than, func=_ee_num_input, text='', error_text="Enter number great or equal than ",
error_text_format_bool=True,
error_text_format="Enter number great or equal than {}", pause=True, pause_text_bool=True,
pause_text='Press Enter...', clear=True, error_text_input="Enter number!",
pause_input=True, pause_input_text_bool=True, pause_input_text=True, clear_input=True,
error_text_bool=True, error_text_input_bool=True, sleep_bool=True, sleep_time=1,
sleep_text_bool=True, sleep_format_text_bool=True, sleep_text="Sleeping time",
sleep_format_text="Sleep for {} seconds!",
sleep_bool_input=True, sleep_time_input=1, sleep_text_bool_input=True,
sleep_format_text_bool_input=True,
sleep_text_input="Sleeping time", sleep_format_text_input="Sleep for {} seconds!"):
"""
:param func: function that instantly returned
:param error_text_format_bool: bool to show formatted error text or not
:param error_text_format: formatted error text
:param error_text_input_bool: bool to show error text or not in input error case
:param num_type: type to input
:param text: text that shows in input default= ''
:param error_text_bool: bool to show error text or not
:param error_text: text that show in case of error bool is true
:param pause: bool to pause for a while
:param pause_text_bool: bool to show text op pause
:param pause_text: text show on pause
:param clear: bool to clear cmd
:param than: number that input number must be great or equal
:param error_text_input: error_text_bool but in input
:param pause_input: pause but in input
:param pause_input_text_bool: pause_text_bool but in input
:param pause_input_text: pause_text but in input
:param clear_input: bool to clear cmd in input but in input
:param sleep_format_text: formatted sleep text
:param sleep_text: sleep text
:param sleep_format_text_bool: if set True that show sleep_format_text else show sleep text
:param sleep_time: time to sleep
:param sleep_text_bool: if set True show text on sleeping
:param sleep_bool: if True sleep program for a sleep_time
:param sleep_format_text_input: formatted sleep text
:param sleep_text_input: sleep text
:param sleep_format_text_bool_input: if set True that show sleep_format_text else show sleep text
:param sleep_time_input: time to sleep
:param sleep_text_bool_input: if set True show text on sleeping
:param sleep_bool_input: if True sleep program for a sleep_time
:return: number
"""
return func(num_type=num_type, eq='<=<=', than=than, text=text, error_text=error_text,
error_text_format_bool=error_text_format_bool,
error_text_format=error_text_format, pause=pause, pause_text_bool=pause_text_bool,
pause_text=pause_text, clear=clear, error_text_input=error_text_input,
pause_input=pause_input, pause_input_text_bool=pause_input_text_bool,
pause_input_text=pause_input_text, clear_input=clear_input,
error_text_bool=error_text_bool, error_text_input_bool=error_text_input_bool,
sleep_bool_input=sleep_bool_input,
sleep_time_input=sleep_time_input, sleep_text_bool_input=sleep_text_bool_input,
sleep_format_text_bool_input=sleep_format_text_bool_input,
sleep_text_input=sleep_text_input, sleep_format_text_input=sleep_format_text_input,
sleep_bool=sleep_bool,
sleep_time=sleep_time, sleep_text_bool=sleep_text_bool, sleep_format_text_bool=sleep_format_text_bool,
sleep_text=sleep_text, sleep_format_text=sleep_format_text) | 92f8d3c119b0c331c50c4213644003c62534deb9 | 22,177 |
def createParetoFig(_pareto_df,_bestPick):
"""
Initalize figure and axes objects using pyplot for pareto curve
Parameters
----------
_pareto_df : Pandas DataFrame
DataFrame from Yahoo_fin that contains all the relevant options data
_bestPick : Pandas Series
Option data for the best pick given the user input settings
Returns
-------
pareto_fig : matplotlib figure object
figure used to plot the stockPareto data from the _pareto_df input
pareto_ax : matplotlib axes object
axes object that holds the stockPareto data from _pareto_df input
plotted using pandas integrated matplotlib .plot function
"""
pareto_fig = Figure(figsize=(6,6), dpi=100)
pareto_ax = pareto_fig.add_subplot(111)
pareto_ax.set_title('Pareto Curve of Available Options in DOW JONES Index')
_pareto_df.plot.scatter(x='POP',y='Potential Gain Multiple Contracts', ax = pareto_ax)
pareto_ax.set_xlabel('Probability of Profit (%)')
pareto_ax.set_ylabel('Potential Gain ($)')
# ax = finalFrame.plot(kind = 'scatter', x='POP',y='Potential Gain Multiple Contracts')
pareto_ax.axvline(_bestPick['POP'], color='green', ls='--')
pareto_ax.axhline(_bestPick['Potential Gain Multiple Contracts'], color='green', ls='--')
return pareto_fig, pareto_ax | dc6d8e6566c8c12d9938bf57a22c569124b65895 | 22,178 |
def rem4(rings, si):
"""finds if the silicon atom is within a 4 membered ring"""
for i in range(len(rings)):
triangles = 0
distances = []
locations = []
for n in range(len(rings[i]) - 1):
for m in range(1, len(rings[i]) - n):
distances.append(distance(rings[i][n], rings[i][n + m]))
locations.append([n, n + m])
locations.append(len(rings[i]))
for n in range(2):
del locations[distances.index(max(distances))]
del distances[distances.index(max(distances))]
for n in range(len(locations)):
triangles += triarea(rings[i][locations[n][0]],
rings[i][locations[n][1]], si)
if ringarea(rings[i]) == triangles:
return"n"
return"y" | e38fb7e1349597fa79cd21d39ecb222f39de86b3 | 22,179 |
def ilogit(x):
"""Return the inverse logit"""
return exp(x) / (1.0 + exp(x)) | 064383b65c2e8b011d2a6cd6ce14bf7936dd3178 | 22,180 |
def get_out_of_bounds_func(limits, bounds_check_type="cube"):
"""returns func returning a boolean array, True for param rows that are out of bounds"""
if bounds_check_type == "cube":
def out_of_bounds(params):
""" "cube" bounds_check_type; checks each parameter independently"""
return ~np.alltrue(
np.logical_and(limits[0] <= params, params <= limits[1]), axis=-1
)
else:
raise ValueError(
f'Only "cube" bounds checks are currently supported; You selected {bounds_check_type}'
)
return out_of_bounds | 56c82efd63afdb36fc4d60cda7912fd9a4edb1d0 | 22,181 |
from typing import Dict
from typing import Set
def inspectors_for_each_mode(lead_type="lead_inspector") -> Dict[str, Set[str]]:
"""
We want to be able to group lead inspectors by submode.
"""
if lead_type not in ["lead_inspector", "deputy_lead_inspector"]:
raise ValueError("Can only query for lead_inspector and deputy_lead_inspector attributes.")
submodes = Submode.objects.all()
out = {}
for sm in submodes:
insp = set()
orgs = sm.organisation_set.all()
for org in orgs:
insp.add(getattr(org, lead_type))
insp = {x for x in insp if x is not None}
out[sm.descriptor] = insp
del insp
return out | b712e82a14ddc8153c0ce61ebd3e95faca5993ae | 22,182 |
import tables
def is_hdf_file(f):
"""Checks if the given file object is recognized as a HDF file.
:type f: str | tables.File
:param f: The file object. Either a str object holding the file name or
a HDF file instance.
"""
if((isinstance(f, str) and (f[-4:] == '.hdf' or f[-3:] == '.h5')) or
(isinstance(f, tables.File))
):
return True
return False | 55d89b0d1afdf2acf705d5266e2c44f6d3901c2e | 22,184 |
def dummy_receivers(request, dummy_streamers):
"""Provides `acquire.Receiver` objects for dummy devices.
Either constructs by giving source ID, or by mocking user input.
"""
receivers = {}
for idx, (_, _, source_id, _) in enumerate(dummy_streamers):
with mock.patch('builtins.input', side_effect=str(idx)):
receiver = request.param(source_id=source_id, autostart=False)
receivers[source_id] = receiver
def teardown():
for sid, receiver in receivers.items():
receiver.stop()
del(receiver)
request.addfinalizer(teardown)
return receivers | 70d8e0f7c4f84f949bdc73a4fd240684d86baaed | 22,185 |
def construct_reverse_protocol(splitting="OVRVO"):
"""Run the steps in the reverse order, and for each step, use the time-reverse of that kernel."""
step_length = make_step_length_dict(splitting)
protocol = []
for step in splitting[::-1]:
transition_density = partial(reverse_kernel(step_mapping[step]), dt=step_length[step])
protocol.append(transition_density)
return protocol | 7526e1cab43eeb3ef58a1ac4a673bf9a9992e287 | 22,187 |
def tabinv(xarr, x):
"""
Find the effective index in xarr of each element in x.
The effective index for each element j in x is the value i such that
:math:`xarr[i] <= x[j] <= xarr[i+1]`, to which is added an interpolation fraction
based on the size of the intervals in xarr.
Parameters
----------
x_arr : array-like
The array of values to search
x : float or array-like
Value (or list of values) to look for in x_arr
Returns
-------
ieff : float
Effective index
"""
npoints, npt = len(xarr), len(xarr) - 1
if npoints <= 1:
raise ValueError("Search array must contain at least 2 elements")
if not (np.all(np.diff(xarr) >= 0) or (np.all(np.diff(xarr) <= 0))):
raise ValueError("Search array must be monotonic")
if not isinstance(x, (list, tuple, np.ndarray)):
x = np.array([x])
# ieff contains values j1, ..., jn such that
# ji = x where xarr[x-1] <= ji < xarr[x]
# If no position is found, ji = len(xarr)
ieff = np.searchsorted(xarr, x, side='right').astype(np.float64)
g = np.where((ieff >= 0) & (ieff < (len(xarr) - 1)))
if len(g) > 0 and len(g[0] > 0):
neff = ieff[g].astype(np.int32)
x0 = xarr[neff].astype(np.float64)
diff = x[g] - x0
ieff[g] = neff + diff / (xarr[neff+1] - x0)
ieff = np.where(ieff>0., ieff, 0.)
return ieff | c020222355e4d7671e7117c30e3babf0fa1d1f46 | 22,188 |
import configparser
def getldapconfig() :
""" Renvoie la configuration ldap actuelle"""
cfg = configparser.ConfigParser()
cfg.read(srv_path)
try :
return (cfg.get('Ldap', 'ldap_address'),
cfg.get('Ldap', 'ldap_username'),
cfg.get('Ldap', 'ldap_password').replace("$percent", "%"),
cfg.get('Ldap', 'ldap_base'))
except :
sleep(0.4)
return getldapconfig() | 505c7d3728b986811a86841003cd763912c82a93 | 22,189 |
def rename_dict_key(_old_key, _new_key, _dict):
"""
renames a key in a dict without losing the order
"""
return { key if key != _old_key else _new_key: value for key, value in _dict.items()} | ddc497796e0e52677afdf09b7f4995cf3a534cbc | 22,190 |
def api_browse_use_case() -> use_cases.APIBrowseUseCase:
"""Get use case instance."""
return use_cases.APIBrowseUseCase(items_repository) | 653525c9338bf8f520014a530228512fae1ed03d | 22,191 |
def get_descriptive_verbs(tree, gender):
"""
Returns a list of verbs describing pronouns of the given gender in the given dependency tree.
:param tree: dependency tree for a document, output of **generate_dependency_tree**
:param gender: `Gender` to search for usages of
:return: List of verbs as strings
"""
verbs = []
for sentence in tree:
for triple in sentence:
if triple[1] == "nsubj" and (triple[0][1] == "VBD" or triple[0][1] == "VB"
or triple[0][1] == "VBP" or triple[0][1] == "VBZ"):
if triple[2][0] in gender.identifiers:
verbs.append(triple[0][0])
return verbs | ec021c89cb59da2ff8abb0169ea2567cb2e3a13c | 22,193 |
def client():
"""Return a client instance"""
return Client('192.168.1.1') | 19cda306e37e7a34395b86010fb4331a238a6cbc | 22,194 |
import termios, fcntl, sys, os
def read_single_keypress():
"""Waits for a single keypress on stdin.
This is a silly function to call if you need to do it a lot because it has
to store stdin's current setup, setup stdin for reading single keystrokes
then read the single keystroke then revert stdin back after reading the
keystroke.
Returns a tuple of characters of the key that was pressed - on Linux,
pressing keys like up arrow results in a sequence of characters. Returns
('\x03',) on KeyboardInterrupt which can happen when a signal gets
handled.
"""
fd = sys.stdin.fileno()
# save old state
flags_save = fcntl.fcntl(fd, fcntl.F_GETFL)
attrs_save = termios.tcgetattr(fd)
# make raw - the way to do this comes from the termios(3) man page.
attrs = list(attrs_save) # copy the stored version to update
# iflag
attrs[0] &= ~(
termios.IGNBRK
| termios.BRKINT
| termios.PARMRK
| termios.ISTRIP
| termios.INLCR
| termios.IGNCR
| termios.ICRNL
| termios.IXON
)
# oflag
attrs[1] &= ~termios.OPOST
# cflag
attrs[2] &= ~(termios.CSIZE | termios.PARENB)
attrs[2] |= termios.CS8
# lflag
attrs[3] &= ~(
termios.ECHONL | termios.ECHO | termios.ICANON | termios.ISIG | termios.IEXTEN
)
termios.tcsetattr(fd, termios.TCSANOW, attrs)
# turn off non-blocking
fcntl.fcntl(fd, fcntl.F_SETFL, flags_save & ~os.O_NONBLOCK)
# read a single keystroke
ret = []
try:
ret.append(sys.stdin.read(1)) # returns a single character
fcntl.fcntl(fd, fcntl.F_SETFL, flags_save | os.O_NONBLOCK)
c = sys.stdin.read(1) # returns a single character
while len(c) > 0:
ret.append(c)
c = sys.stdin.read(1)
except KeyboardInterrupt:
ret.append("\x03")
finally:
# restore old state
termios.tcsetattr(fd, termios.TCSAFLUSH, attrs_save)
fcntl.fcntl(fd, fcntl.F_SETFL, flags_save)
return tuple(ret) | 646c04bc5441557064714716087a9893c7fa66dc | 22,196 |
def set_table(table, fold_test, inner_number_folds, index_table, y_name):
""" Set the table containing the data information
Set the table by adding to each entry (patient) its start and end indexes in the concatenated data object.
In fact each patients i is composed by `n_i` tiles so that for example patient 0 will have as starts and ends indices 0 and `n_0`.
It then separates the dataset into test and train sets (according to `fold_test`).
Finally, several splits of the train sets are done for cross validation, preserving relative class frequency.
Obviously, dataset is shuffled, and splitted at the patient level, so that the indexes returned are the table indexes,
not the concatenated object indexes.
Parameters
----------
table : pd.DataFrame
data information.
fold_test : int
number of the fold which will be used for testing.
inner_number_folds : int
number of splits used in the cross validation.
index_table : dict
maps each file (key) to its start and end index in the data object (concatenated encoded bags)
y_name : str
or "y_interest", is the name of the target variable.
Returns
-------
pd.DataFrame, list(tuple), list
returns 1: the table DataFrame augmented with start and end indexes
2: The `inner_number_folds` splits for cross_validation, each containing (list(train_indexes), list(val_indexes)).
3: List containing indexes of the test dataset.
"""
## add index_table to table so that all the info is in table
table = add_index(table, index_table)
train_table = table[table["fold"] != fold_test]
test_index = table[table["fold"] == fold_test].index
stratified_variable = train_table[y_name].round(0)
skf = StratifiedKFold(n_splits=inner_number_folds, shuffle=True) # Assures that relative class frequency is preserve in each folds.
obj = skf.split(train_table.index, stratified_variable)
# index_folds = [(train_index, val_index) for train_index, val_index in obj]
index_folds = [(np.array(train_table.index[train_index]), np.array(train_table.index[val_index])) for train_index, val_index in obj]
# import pdb; pdb.set_trace()
return table, index_folds, test_index | d46c0601b59f27a60ec99a5305113d94893ba748 | 22,197 |
def parse_args():
"""
引数パース
"""
argparser = ArgumentParser()
argparser.add_argument(
"-b",
"--bucket-name",
help="S3 bucket name",
)
argparser.add_argument(
"-d",
"--days",
type=int,
help="Number of days",
)
return argparser.parse_args() | 36302c14466a2c1a1791217566c49687fc55b567 | 22,198 |
def predict(file):
"""
Returns values predicted
"""
x = load_img(file, target_size=(WIDTH, HEIGHT))
x = img_to_array(x)
x = np.expand_dims(x, axis=0)
array = NET.predict(x)
result = array[0]
answer = np.argmax(result)
return CLASSES[answer], result | 98ce2d770d7bffc47e6fe84521fd6992ab2a53fa | 22,199 |
def sample_categorical(pmf):
"""Sample from a categorical distribution.
Args:
pmf: Probablity mass function. Output of a softmax over categories.
Array of shape [batch_size, number of categories]. Rows sum to 1.
Returns:
idxs: Array of size [batch_size, 1]. Integer of category sampled.
"""
if pmf.ndim == 1:
pmf = np.expand_dims(pmf, 0)
batch_size = pmf.shape[0]
cdf = np.cumsum(pmf, axis=1)
rand_vals = np.random.rand(batch_size)
idxs = np.zeros([batch_size, 1])
for i in range(batch_size):
idxs[i] = cdf[i].searchsorted(rand_vals[i])
return idxs | 5b270e63bb5e290a97cacede9bd0f8bf34fc0ecf | 22,200 |
def make_Dex_3D(dL, shape, bloch_x=0.0):
""" Forward derivative in x """
Nx, Ny , Nz= shape
phasor_x = np.exp(1j * bloch_x)
Dex = sp.diags([-1, 1, phasor_x], [0, Nz*Ny, -Nx*Ny*Nz+Nz*Ny], shape=(Nx*Ny*Nz, Nx*Ny*Nz))
Dex = 1 / dL * sp.kron(sp.eye(1),Dex)
return Dex | 1d3a47624f180d672f43fb65082f29727b42f720 | 22,201 |
def feature_decoder(proto_bytes):
"""Deserializes the ``ProtoFeature`` bytes into Python.
Args:
proto_bytes (bytes): The ProtoBuf encoded bytes of the ProtoBuf class.
Returns:
:class:`~geopyspark.vector_pipe.Feature`
"""
pb_feature = ProtoFeature.FromString(proto_bytes)
return from_pb_feature(pb_feature) | ff7cdc6c0d7f056c69576af2a5b5eb98f57266af | 22,202 |
async def calculate_board_fitness_report(
board: list, zone_height: int, zone_length: int
) -> tuple:
"""Calculate Board Fitness Report
This function uses the general solver functions api to calculate and return all the different collisions on a given board array
representation.
Args:
board (list): A full filled board representation.
zone_height (int): The zones height.
zone_length (int): The zones length.
Returns:
int: Total collisions on the board.
int: Total collisions on the board columns.
int: Total collisions on the board rows.
int: Total collisions on the board zones.
"""
body = {"zoneHeight": zone_height, "zoneLength": zone_length, "board": board}
url = str(environ["FITNESS_REPORT_SCORE_LINK"])
response_body = dict()
headers = {"Authorization": api_key, "Content-Type": "application/json"}
async with ClientSession(headers=headers) as session:
async with session.post(url=url, json=body) as response:
response_body = await response.json()
return (
response_body["totalCollisions"],
response_body["columnCollisions"],
response_body["rowCollisions"],
response_body["zoneCollisions"],
) | a770863c044a4c4452860f9fccf99428dbfb5013 | 22,203 |
def quote_fqident(s):
"""Quote fully qualified SQL identifier.
The '.' is taken as namespace separator and
all parts are quoted separately
Example:
>>> quote_fqident('tbl')
'public.tbl'
>>> quote_fqident('Baz.Foo.Bar')
'"Baz"."Foo.Bar"'
"""
tmp = s.split('.', 1)
if len(tmp) == 1:
return 'public.' + quote_ident(s)
return '.'.join(map(quote_ident, tmp)) | 26cf409a09d2e8614ac4aba04db1eee6cac75f08 | 22,204 |
def row_generator(x, H, W, C):
"""Returns a single entry in the generated dataset.
Return a bunch of random values as an example."""
return {'frame_id': x,
'frame_data': np.random.randint(0, 10,
dtype=np.uint8, size=(H, W, C))} | e99c6e8b1557890b6d20ea299bc54c0773ea8ade | 22,205 |
def accuracy(output, target, topk=(1,)):
"""Computes the precor@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res | 80e73c907e57b9666a8f399b8ed655c919d79abb | 22,206 |
def define_model(input_shape, output_shape, FLAGS):
"""
Define the model along with the TensorBoard summaries
"""
data_format = "channels_last"
concat_axis = -1
n_cl_out = 1 # Number of output classes
dropout = 0.2 # Percentage of dropout for network layers
num_datapoints = input_shape[0]
imgs = tf.placeholder(tf.float32,
shape=([None] + list(input_shape[1:])))
msks = tf.placeholder(tf.float32,
shape=([None] + list(output_shape[1:])))
inputs = K.layers.Input(tensor=imgs, name="Images")
params = dict(kernel_size=(3, 3), activation="relu",
padding="same", data_format=data_format,
kernel_initializer="he_uniform")
trans_params = dict(kernel_size=(2, 2), strides=(2, 2),
data_format=data_format,
kernel_initializer="he_uniform",
padding="same")
conv1 = K.layers.Conv2D(name="conv1a", filters=32, **params)(inputs)
conv1 = K.layers.Conv2D(name="conv1b", filters=32, **params)(conv1)
pool1 = K.layers.MaxPooling2D(name="pool1", pool_size=(2, 2))(conv1)
conv2 = K.layers.Conv2D(name="conv2a", filters=64, **params)(pool1)
conv2 = K.layers.Conv2D(name="conv2b", filters=64, **params)(conv2)
pool2 = K.layers.MaxPooling2D(name="pool2", pool_size=(2, 2))(conv2)
conv3 = K.layers.Conv2D(name="conv3a", filters=128, **params)(pool2)
# Trying dropout layers earlier on, as indicated in the paper
conv3 = K.layers.Dropout(dropout)(conv3)
conv3 = K.layers.Conv2D(name="conv3b", filters=128, **params)(conv3)
pool3 = K.layers.MaxPooling2D(name="pool3", pool_size=(2, 2))(conv3)
conv4 = K.layers.Conv2D(name="conv4a", filters=256, **params)(pool3)
# Trying dropout layers earlier on, as indicated in the paper
conv4 = K.layers.Dropout(dropout)(conv4)
conv4 = K.layers.Conv2D(name="conv4b", filters=256, **params)(conv4)
pool4 = K.layers.MaxPooling2D(name="pool4", pool_size=(2, 2))(conv4)
conv5 = K.layers.Conv2D(name="conv5a", filters=512, **params)(pool4)
conv5 = K.layers.Conv2D(name="conv5b", filters=512, **params)(conv5)
if FLAGS.use_upsampling:
up = K.layers.UpSampling2D(name="up6", size=(2, 2))(conv5)
else:
up = K.layers.Conv2DTranspose(name="transConv6", filters=256,
**trans_params)(conv5)
up6 = K.layers.concatenate([up, conv4], axis=concat_axis)
conv6 = K.layers.Conv2D(name="conv6a", filters=256, **params)(up6)
conv6 = K.layers.Conv2D(name="conv6b", filters=256, **params)(conv6)
if FLAGS.use_upsampling:
up = K.layers.UpSampling2D(name="up7", size=(2, 2))(conv6)
else:
up = K.layers.Conv2DTranspose(name="transConv7", filters=128,
**trans_params)(conv6)
up7 = K.layers.concatenate([up, conv3], axis=concat_axis)
conv7 = K.layers.Conv2D(name="conv7a", filters=128, **params)(up7)
conv7 = K.layers.Conv2D(name="conv7b", filters=128, **params)(conv7)
if FLAGS.use_upsampling:
up = K.layers.UpSampling2D(name="up8", size=(2, 2))(conv7)
else:
up = K.layers.Conv2DTranspose(name="transConv8", filters=64,
**trans_params)(conv7)
up8 = K.layers.concatenate([up, conv2], axis=concat_axis)
conv8 = K.layers.Conv2D(name="conv8a", filters=64, **params)(up8)
conv8 = K.layers.Conv2D(name="conv8b", filters=64, **params)(conv8)
if FLAGS.use_upsampling:
up = K.layers.UpSampling2D(name="up9", size=(2, 2))(conv8)
else:
up = K.layers.Conv2DTranspose(name="transConv9", filters=32,
**trans_params)(conv8)
up9 = K.layers.concatenate([up, conv1], axis=concat_axis)
conv9 = K.layers.Conv2D(name="conv9a", filters=32, **params)(up9)
conv9 = K.layers.Conv2D(name="conv9b", filters=32, **params)(conv9)
predictionMask = K.layers.Conv2D(name="Mask", filters=n_cl_out,
kernel_size=(1, 1),
data_format=data_format,
activation="sigmoid")(conv9)
"""
Define the variables, losses, and metrics
We"ll return these as a dictionary called "model"
"""
model = {}
model["input"] = imgs
model["label"] = msks
model["output"] = predictionMask
model["loss"] = dice_coef_loss(msks, predictionMask)
model["metric_dice"] = dice_coef(msks, predictionMask)
model["metric_sensitivity"] = sensitivity(msks, predictionMask)
model["metric_specificity"] = specificity(msks, predictionMask)
model["metric_dice_test"] = dice_coef(msks, predictionMask)
model["loss_test"] = dice_coef_loss(msks, predictionMask)
model["metric_sensitivity_test"] = sensitivity(msks, predictionMask)
model["metric_specificity_test"] = specificity(msks, predictionMask)
"""
Summaries for TensorBoard
"""
tf.summary.scalar("loss", model["loss"])
tf.summary.histogram("loss", model["loss"])
tf.summary.scalar("dice", model["metric_dice"])
tf.summary.histogram("dice", model["metric_dice"])
tf.summary.scalar("sensitivity", model["metric_sensitivity"])
tf.summary.histogram("sensitivity", model["metric_sensitivity"])
tf.summary.scalar("specificity", model["metric_specificity"])
tf.summary.histogram("specificity", model["metric_specificity"])
tf.summary.image("predictions", predictionMask, max_outputs=3)
tf.summary.image("ground_truth", msks, max_outputs=3)
tf.summary.image("images", imgs, max_outputs=3)
summary_op = tf.summary.merge_all()
return model | 4d6bea9444935af1b95e9b209eee2df7d455e90c | 22,207 |
import re
import collections
def group_files(config_files, group_regex, group_alias="\\1"):
"""group input files by regular expression"""
rx = re.compile(group_regex)
for key, files in list(config_files.items()):
if isinstance(files, list):
groups = collections.defaultdict(list)
unmatched = []
for fn in sorted(files):
r = rx.search(fn)
if r is None:
unmatched.append(fn)
continue
group_name = r.expand(group_alias)
groups[group_name].append(fn)
if len(unmatched) == len(files):
pass
elif len(unmatched) == 0:
config_files[key] = [{x: y} for x, y in list(groups.items())]
else:
raise ValueError(
"input files not matching regular expression {}: {}"
.format(group_regex, str(unmatched)))
return config_files | 7f0c14387a9a63d03e8fdcb2297502a4ebf31e80 | 22,208 |
def get_current_icmp_seq():
"""See help(scapy.arch.windows.native) for more information.
Returns the current ICMP seq number."""
return GetIcmpStatistics()['stats']['icmpOutStats']['dwEchos'] | 4e5798a6187cd8da55b54698deab4f00aec19144 | 22,209 |
def text_mocked_request(data: str, **kwargs) -> web.Request:
"""For testng purposes."""
return mocked_request(data.encode(), content_type="text/plain", **kwargs) | fe9acfd2d7801a387f6497bdd72becd94da57ea9 | 22,210 |
def get_imu_data():
"""Returns a 2d array containing the following
* ``senses[0] = accel[x, y, z]`` for accelerometer data
* ``senses[1] = gyro[x, y, z]`` for gyroscope data
* ``senses[2] = mag[x, y, z]`` for magnetometer data
.. note:: Not all data may be aggregated depending on the IMU device connected to the robot.
"""
senses = [
[100, 50, 25],
[-100, -50, -25],
[100, -50, 25]
]
for imu in IMUs:
if isinstance(imu, LSM9DS1_I2C):
senses[0] = list(imu.acceleration)
senses[1] = list(imu.gyro)
senses[2] = list(imu.magnetic)
elif isinstance(imu, MPU6050):
senses[0] = list(imu.acceleration)
senses[1] = list(imu.gryo)
return senses | 24f24316a051a4ac9f1d8d7cbab00be05ff11c25 | 22,211 |
def parse_proc_diskstats(proc_diskstats_contents):
# type: (six.text_type) -> List[Sample]
"""
Parse /proc/net/dev contents into a list of samples.
"""
return_me = [] # type: List[Sample]
for line in proc_diskstats_contents.splitlines():
match = PROC_DISKSTATS_RE.match(line)
if not match:
continue
name = match.group(1)
read_sectors = int(match.group(2))
write_sectors = int(match.group(3))
if read_sectors == 0 and write_sectors == 0:
continue
# Multiply by 512 to get bytes from sectors:
# https://stackoverflow.com/a/38136179/473672
return_me.append(Sample(name + " read", read_sectors * 512))
return_me.append(Sample(name + " write", write_sectors * 512))
return return_me | af24bc01d7e31dc43cf07057fae672ba62b20e53 | 22,212 |
def normalize(x):
"""Normalize a vector or a set of vectors.
Arguments:
* x: a 1D array (vector) or a 2D array, where each row is a vector.
Returns:
* y: normalized copies of the original vector(s).
"""
if x.ndim == 1:
return x / np.sqrt(np.sum(x ** 2))
elif x.ndim == 2:
return x / np.sqrt(np.sum(x ** 2, axis=1)).reshape((-1, 1)) | f4e813b22a9088c3a9a209e94963b33c24fab88e | 22,213 |
def compute_perrakis_estimate(marginal_sample, lnlikefunc, lnpriorfunc,
lnlikeargs=(), lnpriorargs=(),
densityestimation='histogram', **kwargs):
"""
Computes the Perrakis estimate of the bayesian evidence.
The estimation is based on n marginal posterior samples
(indexed by s, with s = 0, ..., n-1).
:param array marginal_sample:
A sample from the parameter marginal posterior distribution.
Dimensions are (n x k), where k is the number of parameters.
:param callable lnlikefunc:
Function to compute ln(likelihood) on the marginal samples.
:param callable lnpriorfunc:
Function to compute ln(prior density) on the marginal samples.
:param tuple lnlikeargs:
Extra arguments passed to the likelihood function.
:param tuple lnpriorargs:
Extra arguments passed to the lnprior function.
:param str densityestimation:
The method used to estimate the marginal posterior density of each
model parameter ("normal", "kde", or "histogram").
Other parameters
----------------
:param kwargs:
Additional arguments passed to estimate_density function.
:return:
References
----------
Perrakis et al. (2014; arXiv:1311.0674)
"""
if not isinstance(marginal_sample, np.ndarray):
marginal_sample = np.array(marginal_sample)
number_parameters = marginal_sample.shape[1]
##
# Estimate marginal posterior density for each parameter.
log_marginal_posterior_density = np.zeros(marginal_sample.shape)
for parameter_index in range(number_parameters):
# Extract samples for this parameter.
x = marginal_sample[:, parameter_index]
# Estimate density with method "densityestimation".
log_marginal_posterior_density[:, parameter_index] = \
estimate_logdensity(x, method=densityestimation, **kwargs)
# Compute produt of marginal posterior densities for all parameters
log_marginal_densities = log_marginal_posterior_density.sum(axis=1)
##
# Compute log likelihood in marginal sample.
log_likelihood = lnlikefunc(marginal_sample, *lnlikeargs)
# Compute weights (i.e. prior over marginal density)
w = weight(marginal_sample, lnpriorfunc, lnpriorargs,
log_marginal_densities)
# Mask values with zero likelihood (a problem in lnlike)
cond = log_likelihood != 0
# Use identity for summation
# http://en.wikipedia.org/wiki/List_of_logarithmic_identities#Summation.2Fsubtraction
# ln(sum(x)) = ln(x[0]) + ln(1 + sum( exp( ln(x[1:]) - ln(x[0]) ) ) )
# log_summands = log_likelihood[cond] + np.log(prior_probability[cond])
# - np.log(marginal_densities[cond])
perr = lib.log_sum(w[cond] + log_likelihood[cond]) - log(len(w[cond]))
return perr | 70a287e3ed8391ecef1e48d7db846593fe240823 | 22,214 |
def postNewProfile(profile : Profile):
"""Gets all profile details of user with given profile_email
Parameters:
str: profile_email
Returns:
Json with Profile details """
profile_email = profile.email
profile_query = collection.find({"email":profile_email})
profile_query = [item for item in profile_query]
if not profile_query :
collection.save(dict(profile))
return True
return False | be81eac071e89a9ff8d44ac8e2cd479e911763b6 | 22,215 |
from typing import List
def get_templates() -> List[dict]:
"""
Gets a list of Templates that the active client can access
"""
client = get_active_notification_client()
if not client:
raise NotificationClientNotFound()
r = _get_templates(client=client)
return r | b7603ba33e1628eb6dad91b861bf17ecb914c1eb | 22,217 |
def svn_mergeinfo_intersect2(*args):
"""
svn_mergeinfo_intersect2(svn_mergeinfo_t mergeinfo1, svn_mergeinfo_t mergeinfo2,
svn_boolean_t consider_inheritance, apr_pool_t result_pool,
apr_pool_t scratch_pool) -> svn_error_t
"""
return _core.svn_mergeinfo_intersect2(*args) | 5c8176eec56fb1a95b306af41c2d98caa75459ec | 22,218 |
def conv_backward(dZ, A_prev, W, b, padding="same", stride=(1, 1)):
"""
Performs back propagation over a convolutional layer of a neural network
dZ is a numpy.ndarray of shape (m, h_new, w_new, c_new) containing the
partial derivatives with respect to the unactivated output of the
convolutional layer
m is the number of examples
h_new is the height of the output
w_new is the width of the output
c_new is the number of channels in the output
A_prev is a numpy.ndarray of shape (m, h_prev, w_prev, c_prev) containing
the output of the previous layer
m is the number of examples
h_prev is the height of the previous layer
w_prev is the width of the previous layer
c_prev is the number of channels in the previous layer
W is a numpy.ndarray of shape (kh, kw, c_prev, c_new) containing the
kernels for the convolution
kh is the filter height
kw is the filter width
c_prev is the number of channels in the previous layer
c_new is the number of channels in the output
b is a numpy.ndarray of shape (1, 1, 1, c_new) containing the biases
applied to the convolution
padding is a string that is either same or valid, indicating the type of
padding used
stride is a tuple of (sh, sw) containing the strides for the convolution
sh is the stride for the height
sw is the stride for the width
Returns: the partial derivatives with respect to the previous layer
(dA_prev), the kernels (dW), and the biases (db), respectively
"""
sh, sw = stride
kh, kw, c, c_new = W.shape
m, h_prev, w_prev, c_prev = A_prev.shape
d, h_new, w_new, _ = dZ.shape
if padding == 'same':
padw = int((((w_prev - 1) * sw + kw - w_prev) / 2) + 1)
padh = int((((h_prev - 1) * sh + kh - h_prev) / 2) + 1)
else:
padh, padw = (0, 0)
A_prev = np.pad(A_prev, ((0,), (padh,), (padw,), (0,)), constant_values=0,
mode='constant')
dW = np.zeros(W.shape)
dA = np.zeros(A_prev.shape)
db = np.sum(dZ, axis=(0, 1, 2), keepdims=True)
for i in range(m):
for j in range(h_new):
for k in range(w_new):
jsh = j * sh
ksw = k * sw
for ll in range(c_new):
dW[:, :, :, ll] += A_prev[i, jsh: jsh + kh,
ksw: ksw + kw, :] * \
dZ[i, j, k, ll]
dA[i, jsh: jsh + kh, ksw: ksw + kw, :] += \
dZ[i, j, k, ll] * W[:, :, :, ll]
if padding == 'same':
dA = dA[:, padh: -padh, padw: -padw, :]
return dA, dW, db | d55eab80411efa903e03b584464ff50196468d7d | 22,220 |
import collections
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text | 5ca989a4ae5ce00cd9c09c4d9480dbeb935d6ca8 | 22,222 |
def createContext(data, id=None, keyTransform=None, removeNull=False):
"""Receives a dict with flattened key values, and converts them into nested dicts
:type data: ``dict`` or ``list``
:param data: The data to be added to the context (required)
:type id: ``str``
:keyword id: The ID of the context entry
:type keyTransform: ``function``
:keyword keyTransform: A formatting function for the markdown table headers
:type removeNull: ``bool``
:keyword removeNull: True if empty columns should be removed, false otherwise
:return: The converted context list
:rtype: ``list``
"""
if isinstance(data, (list, tuple)):
return [createContextSingle(d, id, keyTransform, removeNull) for d in data]
else:
return createContextSingle(data, id, keyTransform, removeNull) | a97c599689932cbfea7063fdae32702d413352ac | 22,223 |
import math
def haversine(phi1, lambda1, phi2, lambda2):
"""
calculate angular great circle distance with haversine formula
see parameters in spherical_law_of_cosines
"""
d_phi = phi2 - phi1
d_lambda = lambda2 - lambda1
a = math.pow(math.sin(d_phi / 2), 2) + \
math.cos(phi1) * math.cos(phi2) * math.pow(math.sin(d_lambda / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
return c | acb25fc8d305dde7b18059a770bdcd9b135b295a | 22,225 |
import yaml
from datetime import datetime
def get_backup_start_timestamp(bag_name):
"""
Input: Fisrt bag name
Output: datatime object
"""
info_dict = yaml.load(Bag(bag_name, 'r')._get_yaml_info())
start_timestamp = info_dict.get("start", None)
start_datetime = None
if start_timestamp is None:
print("No start time info in bag, try to retrieve the start time by parsing bag name.")
start_datetime = parse_backup_start_timestamp(bag_name)
else:
start_datetime = datetime.datetime.fromtimestamp(start_timestamp)
# print("info_dict = \n%s" % str(info_dict))
# print('type(info_dict["start"]) = %s' % type(info_dict["start"]))
# print(info_dict["start"])
return start_datetime | b8ee55c3028fb6f6e1137d614e836724c3e00bd6 | 22,226 |
def get_mask_areas(masks: np.ndarray) -> np.ndarray:
"""Get mask areas from the compressed mask map."""
# 0 for background
ann_ids = np.sort(np.unique(masks))[1:]
areas = np.zeros((len(ann_ids)))
for i, ann_id in enumerate(ann_ids):
areas[i] = np.count_nonzero(ann_id == masks)
return areas | bf584c9529118d9946e461b4df22cf64efbeb251 | 22,228 |
def cursor_from_image(image):
"""
Take a valid cursor image and create a mouse cursor.
"""
colors = {(0,0,0,255) : "X",
(255,255,255,255) : "."}
rect = image.get_rect()
icon_string = []
for j in range(rect.height):
this_row = []
for i in range(rect.width):
pixel = tuple(image.get_at((i,j)))
this_row.append(colors.get(pixel, " "))
icon_string.append("".join(this_row))
return icon_string | 173c3fc6bfcc6bb45c9e1e6072d7c68244750da9 | 22,229 |
def h_matrix(jac, p, lamb, method='kotre', W=None):
"""
JAC method of dynamic EIT solver:
H = (J.T*J + lamb*R)^(-1) * J.T
Parameters
----------
jac: NDArray
Jacobian
p, lamb: float
regularization parameters
method: str, optional
regularization method
Returns
-------
H: NDArray
pseudo-inverse matrix of JAC
"""
if W is None:
j_w_j = np.dot(jac.transpose(), jac)
else:
j_w_j = multi_dot([jac.transpose(), W, jac])
if method == 'kotre':
# see adler-dai-lionheart-2007
# p=0 : noise distribute on the boundary ('dgn')
# p=0.5 : noise distribute on the middle
# p=1 : noise distribute on the center ('lm')
r_mat = np.diag(np.diag(j_w_j)) ** p
elif method == 'lm':
# Marquardt–Levenberg, 'lm' for short
# or can be called NOSER, DLS
r_mat = np.diag(np.diag(j_w_j))
else:
# Damped Gauss Newton, 'dgn' for short
r_mat = np.eye(jac.shape[1])
# build H
h_mat = np.dot(la.inv(j_w_j + lamb * r_mat), jac.transpose())
return h_mat | fc4d225bb2d98ee067b03c10f14ad23db6fad1a9 | 22,230 |
def _get_flavors_metadata_ui_converters_from_configuration():
"""Get flavor metadata ui converters from flavor mapping config dir."""
flavors_metadata_ui_converters = {}
configs = util.load_configs(setting.FLAVOR_MAPPING_DIR)
for config in configs:
adapter_name = config['ADAPTER']
flavor_name = config['FLAVOR']
flavors_metadata_ui_converters.setdefault(
adapter_name, {}
)[flavor_name] = config.get('CONFIG_MAPPING', {})
adapters = adapter_api.ADAPTERS
parents = {}
for adapter_name, adapter in adapters.items():
parent = adapter.get('parent', None)
parents[adapter_name] = parent
for adapter_name, adapter in adapters.items():
flavors_metadata_ui_converters[adapter_name] = (
util.recursive_merge_dict(
adapter_name, flavors_metadata_ui_converters, parents
)
)
return flavors_metadata_ui_converters | 4cb8dc1737579cd76dd696ec08f984015e3ef77b | 22,231 |
def outermost_scope_from_subgraph(graph, subgraph, scope_dict=None):
"""
Returns the outermost scope of a subgraph.
If the subgraph is not connected, there might be several
scopes that are locally outermost. In this case, it
throws an Exception.
"""
if scope_dict is None:
scope_dict = graph.scope_dict()
scopes = set()
for element in subgraph:
scopes.add(scope_dict[element])
# usual case: Root of scope tree is in subgraph,
# return None (toplevel scope)
if None in scopes:
return None
toplevel_candidates = set()
for scope in scopes:
# search the one whose parent is not in scopes
# that must be the top level one
current_scope = scope_dict[scope]
while current_scope and current_scope not in scopes:
current_scope = scope_dict[current_scope]
if current_scope is None:
toplevel_candidates.add(scope)
if len(toplevel_candidates) != 1:
raise TypeError("There are several locally top-level nodes. "
"Please check your subgraph and see to it "
"being connected.")
else:
return toplevel_candidates.pop() | 0bd649d00b745065e75e2dcfc37d9b16eaa0c3db | 22,232 |
def calc_entropy_ew(molecule, temp):
"""
Expoential well entropy
:param molecule:
:param temp:
:param a:
:param k:
:return:
"""
mass = molecule.mass / Constants.amu_to_kg * Constants.amu_to_au
a = molecule.ew_a_inv_ang * Constants.inverse_ang_inverse_au
k = molecule.ew_k_kcal * Constants.kcal_mol_to_au
q_t = _q_t_ew(molecule, temp)
beta = 1.0 / (Constants.kb_au * temp)
cap_lambda = ((2.0 * mass * np.pi) / (beta * Constants.h_au ** 2)) ** 1.5
def integrand(r, beta, a, b):
return r ** 2 * np.exp(-beta * a * (np.exp(b * r) - 1.0) + b * r)
integral = integrate.quad(integrand, 0.0, 10.0, args=(beta, k, a))[0]
term_4 = 4.0 * np.pi * (k * beta * cap_lambda / q_t) * integral
return Constants.r * (1.5 - k * beta + np.log(q_t) + term_4) | 7d4d2c13ce5b081e4b209169a3cf996dd0b34a44 | 22,233 |
def csm(A, B):
"""
Calculate Cosine similarity measure of distance between two vectors `A` and `B`.
Parameters
-----------
A : ndarray
First vector containing values
B : ndarray
Second vector containing values
Returns
--------
float
distance value between two vectors
Examples
---------
>>> distance = csm(A, B)
"""
numerator = np.sum(A * B)
denominator = (np.sqrt(np.sum(A))) * (np.sqrt(np.sum(B)))
if denominator == 0:
denominator = 1
return numerator / denominator | cebca4a53ed3200d4820041fca7886df57f4a40c | 22,235 |
import random
import string
def rand_email():
"""Random email.
Usage Example::
>>> rand_email()
[email protected]
"""
name = random.choice(string.ascii_letters) + \
rand_str(string.ascii_letters + string.digits, random.randint(4, 14))
domain = rand_str(string.ascii_lowercase, random.randint(2, 10))
kind = random.choice(_all_email_kinds)
return "%s@%s%s" % (name, domain, kind) | 9898669f59511d5b8fd403de0ab7174e7710d898 | 22,237 |
import asyncio
async def value_to_deep_structure(value, hash_pattern):
"""build deep structure from value"""
try:
objects = {}
deep_structure0 = _value_to_objects(
value, hash_pattern, objects
)
except (TypeError, ValueError):
raise DeepStructureError(hash_pattern, value) from None
obj_id_to_checksum = {}
new_checksums = set()
async def conv_obj_id_to_checksum(obj_id):
obj = objects[obj_id]
obj_buffer = await serialize(obj, "mixed")
obj_checksum = await calculate_checksum(obj_buffer)
new_checksums.add(obj_checksum.hex())
buffer_cache.cache_buffer(obj_checksum, obj_buffer)
obj_id_to_checksum[obj_id] = obj_checksum.hex()
coros = []
for obj_id in objects:
coro = conv_obj_id_to_checksum(obj_id)
coros.append(coro)
await asyncio.gather(*coros)
deep_structure = _build_deep_structure(
hash_pattern, deep_structure0, obj_id_to_checksum
)
return deep_structure, new_checksums | 05df4e4cec2a39006631f96a84cd6268a6550b68 | 22,239 |
def get_users_run(jobs, d_from, target, d_to='', use_unit='cpu',
serialize_running=''):
"""Takes a DataFrame full of job information and
returns usage for each "user"
uniquely based on specified unit.
This function operates as a stepping stone for plotting usage figures
and returns various series and frames for several different uses.
Parameters
-------
jobs: DataFrame
Job DataFrame typically generated by slurm/sacct_jobs
or the ccmnt package.
use_unit: str, optional
Usage unit to examine. One of: {'cpu', 'cpu-eqv', 'gpu', 'gpu-eqv'}.
Defaults to 'cpu'.
d_from: date str
Beginning of the query period, e.g. '2019-04-01T00:00:00'.
target: int-like
Typically a cpu allocation or core eqv value for a particular acount.
Often 50.
d_to: date str, optional
End of the query period, e.g. '2020-01-01T00:00:00'.
Defaults to now if empty.
serialize_running: str, optional
Pickle given structure with argument as a name.
If left empty, pickle procedure is skipped.
Defaults to empty.
Returns
-------
user_running_cat:
Frame of running resources for each of the unique
"users" in the jobs data frame.
"""
users = jobs.user.unique()
user_count = 0
for user in users:
user_mask = jobs['user'].str.match(user)
user_jobs = jobs[user_mask].copy()
_, user_queued, user_running, _ = job_use(user_jobs, d_from,
target, d_to=d_to,
use_unit=use_unit)
user_queued=user_queued[d_from:d_to]
user_running=user_running[d_from:d_to]
if user_count == 0:
user_running_cat = pd.Series(user_running,
index=user_running.index,
name=user)
else:
user_running_ser = pd.Series(user_running,
index=user_running.index,
name=user)
user_running_cat = pd.concat([user_running_cat, user_running_ser],
axis=1)
user_count = user_count + 1
if user_count == 1:
user_running_cat = user_running_cat.to_frame()
if serialize_running != '':
user_running_cat.to_pickle(serialize_running)
return user_running_cat | ab40605468e40b7e76a35d4bc2c1344be9050d5f | 22,240 |
import collections
def get_classes_constants(paths):
"""
Extract the vtk class names and constants from the path.
:param paths: The path(s) to the Python file(s).
:return: The file name, the VTK classes and any VTK constants.
"""
res = collections.defaultdict(set)
for path in paths:
content = path.read_text().split('\n')
for line in content:
for pattern in Patterns.skip_patterns:
m = pattern.search(line)
if m:
continue
for pattern in Patterns.vtk_patterns:
m = pattern.search(line)
if m:
for g in m.groups():
res[str(path)].add(g)
return res | e58531e99c37c1c23abb46b18f4b2af0b95c5db9 | 22,241 |
def predict_unfolding_at_temperature(temp, data, PDB_files):
"""
Function to predict lables for all trajectoires at a given temperature
Note: The assumption is that at a given temperature, all snapshots are at the same times
Filter should be 'First commit' or 'Last commit' or 'Filter osc' as described in ClusterPCA
You can also enter None (or anything else besides the options above) in whcih case no filtering is applied
"""
temp=str(temp)
if len(temp)==1:
temp='{}.'.format(temp)
while len(temp)<5: #add zeros so that the temperature is of the form 0.80
temp='{}0'.format(temp)
f, trajectories = utils.get_trajectory(data, PDB_files, '{}_'.format(temp) )
#need to figure out how long are all the trajectories.
#to figure this out, iterate through the first files until you see a change
go=True
i=0
traj_nums=[]
while go:
file=f[i]
file=file.split('{}_'.format(temp))
suffix=file[1]
traj_num=suffix.split('.')[0]
traj_nums.append(traj_num)
if traj_nums[i]!=traj_nums[i-1]:
go=False
else:
i+=1
traj_len=i
n_trajectories=int(len(f)/traj_len)
sim_labels=np.zeros((n_trajectories, traj_len))
times=utils.get_times(f[0:traj_len])
for n in range(n_trajectories):
traj=trajectories[n*traj_len:n*traj_len+traj_len]
sim_labels[n,:]=traj
return times, sim_labels | 9fec4ee407bf41692c57899e96ff16ad2acdf4ea | 22,242 |
def _frac_scorer(matched_hs_ions_df, all_hyp_ions_df, N_spectra):
"""Fraction ion observed scorer.
Provides a score based off of the fraction of hypothetical ions that were observed
for a given hypothetical structure.
Parameters
----------
matched_hs_ions_df : pd.DataFrame
Dataframe of observed ions that matched a specific hypothetical structure
all_hyp_ions_df : pd.DataFrame
Dataframe of all possible ions for a given hypothetical structure.
N_spectra : int
Number of spectra provided.
Returns
-------
float
Score for a given hypothetical structure.
"""
# Calculate the number of matched ions observed and total possible
N_matched_hs_ions = matched_hs_ions_df.shape[0]
N_tot_hyp_ions = all_hyp_ions_df.shape[0]
score = N_matched_hs_ions / (N_tot_hyp_ions*N_spectra)
return score | a341b02b7ba64eb3b29032b4fe681267c5d36a00 | 22,243 |
def role_in(roles_allowed):
"""
A permission checker that checks that a role possessed by the user matches one of the role_in list
"""
def _check_with_authuser(authuser):
return any(r in authuser.roles for r in roles_allowed)
return _check_with_authuser | 24ff0423dc50187f3607329342af6c8930596a36 | 22,244 |
from typing import List
def elements_for_model(model: Model) -> List[str]:
"""Creates a list of elements to expect to register.
Args:
model: The model to create a list for.
"""
def increment(index: List[int], dims: List[int]) -> None:
# assumes index and dims are the same length > 0
# modifies index argument
i = len(index) - 1
index[i] += 1
while index[i] == dims[i]:
index[i] = 0
i -= 1
if i == -1:
break
index[i] += 1
def index_to_str(index: List[int]) -> str:
result = ''
for i in index:
result += '[{}]'.format(i)
return result
def generate_indices(multiplicity: List[int]) -> List[str]:
# n-dimensional counter
indices = list() # type: List[str]
index = [0] * len(multiplicity)
indices.append(index_to_str(index))
increment(index, multiplicity)
while sum(index) > 0:
indices.append(index_to_str(index))
increment(index, multiplicity)
return indices
result = list() # type: List[str]
for element in model.compute_elements:
if len(element.multiplicity) == 0:
result.append(str(element.name))
else:
for index in generate_indices(element.multiplicity):
result.append(str(element.name) + index)
return result | a0769d762fc31ac128ad077e1601b3ba3bcd6a27 | 22,245 |
def form_IntegerNoneDefault(request):
"""
An integer field defaulting to None
"""
schema = schemaish.Structure()
schema.add('myIntegerField', schemaish.Integer())
form = formish.Form(schema, 'form')
form.defaults = {'myIntegerField':None}
return form | 322671035e232cfd99c7500fe0995d652a4fbe7a | 22,246 |
import string
def tokenize(text, stopwords):
"""Tokenizes and removes stopwords from the document"""
without_punctuations = text.translate(str.maketrans('', '', string.punctuation))
tokens = word_tokenize(without_punctuations)
filtered = [w.lower() for w in tokens if not w in stopwords]
return filtered | 7a231d124e89c97b53779fee00874fb2cb40155e | 22,247 |
def to_dict(prim: Primitive) -> ObjectData:
"""Convert a primitive to a dictionary for serialization."""
val: BasePrimitive = prim.value
data: ObjectData = {
"name": val.name,
"size": val.size,
"signed": val.signed,
"integer": prim in INTEGER_PRIMITIVES,
}
if val.min != 0 or val.max != 0:
data["min"] = val.min
data["max"] = val.max
return data | 32d57b89e6740239b55b7f491e16de7f9b31a186 | 22,248 |
import requests
def get_raw_img(url):
"""
Download input image from url.
"""
pic = False
response = requests.get(url, stream=True)
with open('./imgs/img.png', 'wb') as file:
for chunk in response.iter_content():
file.write(chunk)
pic = True
response.close()
return pic | 67b2cf9f2c89c26fca865ea93be8f6e32cfa2de5 | 22,249 |
def get_and_validate_study_id(chunked_download=False):
"""
Checks for a valid study object id or primary key.
If neither is given, a 400 (bad request) error is raised.
Study object id malformed (not 24 characters) causes 400 error.
Study object id otherwise invalid causes 400 error.
Study does not exist in our database causes 404 error.
"""
study = _get_study_or_abort_404(request.values.get('study_id', None),
request.values.get('study_pk', None))
if not study.is_test and chunked_download:
# You're only allowed to download chunked data from test studies
return abort(404)
else:
return study | 405420481c343afcaacbcfc14bc75fc7acf5aae9 | 22,250 |
import re
def tokenize_char(pinyin: str) -> tuple[str, str, int] | None:
"""
Given a string containing the pinyin representation of a Chinese character, return a 3-tuple containing its
initial (``str``), final (``str``), and tone (``int; [0-4]``), or ``None`` if it cannot be properly tokenized.
"""
initial = final = ''
tone = 0
for i in pinyin:
if i in __TONED_VOWELS:
tone = __TONED_VOWELS[i][1]
pinyin = pinyin.replace(i, __TONED_VOWELS[i][0])
break
for f in __FINALS:
if (s := re.search(f, pinyin)) is not None:
final = s[0]
initial = re.sub(f, '', pinyin)
break
return (initial, final, tone) if final else None | e4bfb4712857d9201daff187ab63c9846be17764 | 22,251 |
def is_in_cell(point:list, corners:list) -> bool:
"""
Checks if a point is within a cell.
:param point: Tuple of lat/Y,lon/X-coordinates
:param corners: List of corner coordinates
:returns: Boolean whether point is within cell
:Example:
"""
y1, y2, x1, x2 = corners[2][0], corners[0][0], corners[0][1], corners[2][1]
if (y1 <= point[0] <= y2) and (x1 <= point[1] <= x2):
return True
return False | 5f8f13a65ea4da1909a6b701a04e391ebed413dc | 22,252 |
def json_response(function):
"""
This decorator can be used to catch :class:`~django.http.Http404` exceptions and convert them to a :class:`~django.http.JsonResponse`.
Without this decorator, the exceptions would be converted to :class:`~django.http.HttpResponse`.
:param function: The view function which should always return JSON
:type function: ~collections.abc.Callable
:return: The decorated function
:rtype: ~collections.abc.Callable
"""
@wraps(function)
def wrap(request, *args, **kwargs):
r"""
The inner function for this decorator.
It tries to execute the decorated view function and returns the unaltered result with the exception of a
:class:`~django.http.Http404` error, which is converted into JSON format.
:param request: Django request
:type request: ~django.http.HttpRequest
:param \*args: The supplied arguments
:type \*args: list
:param \**kwargs: The supplied kwargs
:type \**kwargs: dict
:return: The response of the given function or an 404 :class:`~django.http.JsonResponse`
:rtype: ~django.http.JsonResponse
"""
try:
return function(request, *args, **kwargs)
except Http404 as e:
return JsonResponse({"error": str(e) or "Not found."}, status=404)
return wrap | 0b13ff38d932c64fd5afbb017601e34c1c26648b | 22,253 |
import re
def generate_junit_report_from_cfn_guard(report):
"""Generate Test Case from cloudformation guard report"""
test_cases = []
count_id = 0
for file_findings in report:
finding = file_findings["message"]
# extract resource id from finsind line
resource_regex = re.search("^\[([^]]*)]", finding)
if resource_regex:
resource_id = resource_regex.group(1)
test_case = TestCase(
"%i - %s" % (count_id, finding),
classname=resource_id)
test_case.add_failure_info(output="%s#R:%s" % (file_findings["file"], resource_id))
test_cases.append(test_case)
count_id += 1
test_suite = TestSuite("aws cfn-guard test suite", test_cases)
return TestSuite.to_xml_string([test_suite], prettyprint=False) | cdf747c535042bf93c204fe8d2b647b3045f7ed7 | 22,254 |
def new_custom_alias():
"""
Create a new custom alias
Input:
alias_prefix, for ex "www_groupon_com"
alias_suffix, either [email protected] or @my-domain.com
optional "hostname" in args
Output:
201 if success
409 if the alias already exists
"""
user = g.user
if not user.can_create_new_alias():
LOG.d("user %s cannot create any custom alias", user)
return (
jsonify(
error="You have reached the limitation of a free account with the maximum of "
f"{MAX_NB_EMAIL_FREE_PLAN} aliases, please upgrade your plan to create more aliases"
),
400,
)
user_custom_domains = [cd.domain for cd in user.verified_custom_domains()]
hostname = request.args.get("hostname")
data = request.get_json()
if not data:
return jsonify(error="request body cannot be empty"), 400
alias_prefix = data.get("alias_prefix", "").strip()
alias_suffix = data.get("alias_suffix", "").strip()
alias_prefix = convert_to_id(alias_prefix)
if not verify_prefix_suffix(user, alias_prefix, alias_suffix, user_custom_domains):
return jsonify(error="wrong alias prefix or suffix"), 400
full_alias = alias_prefix + alias_suffix
if GenEmail.get_by(email=full_alias):
LOG.d("full alias already used %s", full_alias)
return jsonify(error=f"alias {full_alias} already exists"), 409
gen_email = GenEmail.create(user_id=user.id, email=full_alias)
db.session.commit()
if hostname:
AliasUsedOn.create(gen_email_id=gen_email.id, hostname=hostname)
db.session.commit()
return jsonify(alias=full_alias), 201 | 552812711eefd182d7671e3ac72776bbf908ff33 | 22,255 |
def seq(seq_aps):
"""Sequence of parsers `seq_aps`."""
if not seq_aps:
return succeed(list())
else:
ap = seq_aps[0]
aps = seq_aps[1:]
return ap << cons >> seq(aps) | ab94d3372f229e13a83387b256f3daa3ab2357a5 | 22,257 |
def Growth_factor_Heath(omega_m, z):
"""
Computes the unnormalised growth factor at redshift z given the present day value of omega_m. Uses the expression
from Heath1977
Assumes Flat LCDM cosmology, which is fine given this is also assumed in CambGenerator. Possible improvement
could be to tabulate this using the CambGenerator so that it would be self consistent for non-LCDM cosmologies.
:param omega_m: the matter density at the present day
:param z: the redshift we want the matter density at
:return: the unnormalised growth factor at redshift z.
"""
avals = np.logspace(-4.0, np.log10(1.0 / (1.0 + z)), 10000)
integ = integrate.simps(1.0 / (avals * E_z(omega_m, 1.0 / avals - 1.0)) ** 3, avals, axis=0)
return 5.0 / 2.0 * omega_m * E_z(omega_m, z) * integ | c14e93a871f57c0566b13adb9005c54e68fbfa0f | 22,258 |
def freq2bark(freq_axis):
""" Frequency conversion from Hertz to Bark
See E. Zwicker, H. Fastl: Psychoacoustics. Springer,Berlin, Heidelberg, 1990.
The coefficients are linearly interpolated from the values given in table 6.1.
Parameter
---------
freq_axis : numpy.array
Hertz frequencies to be converted
Output
------
bark_axis : numpy.array
frequencies converted in Bark
"""
xp = np.array([ 0, 50, 100, 150, 200, 250, 300, 350, 400,
450, 510, 570, 630, 700, 770, 840, 920, 1000,
1080, 1170, 1270, 1370, 1480, 1600, 1720, 1850, 2000,
2150, 2320, 2500, 2700, 2900, 3150, 3400, 3700, 4000,
4400, 4800, 5300, 5800, 6400, 7000, 7700, 8500, 9500,
10500, 12000, 13500, 15500, 20000])
yp = np.arange(0,25,0.5)
return np.interp(freq_axis,xp,yp) | f6bd27c54debe8cd8b79099f106e1bf7d4350010 | 22,259 |
def close_connection(conn: Connection):
"""
Closes current connection.
:param conn Connection: Connection to close.
"""
if conn:
conn.close()
return True
return False | bca91687677860a7937875335701afb923ba49cc | 22,261 |
import warnings
def tile_memory_free(y, shape):
"""
XXX Will be deprecated
Tile vector along multiple dimension without allocating new memory.
Parameters
----------
y : np.array, shape (n,)
data
shape : np.array, shape (m),
Returns
-------
Y : np.array, shape (n, *shape)
"""
warnings.warn('Will be deprecated. Use np.newaxis instead')
for dim in range(len(shape)):
y = y[..., np.newaxis]
return y | f800c44ddd2a66553619157d8c8374a4c33dde18 | 22,262 |
def load_ref_system():
""" Returns d-talose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -0.6934 -0.4440 -0.1550
C -2.0590 0.1297 0.3312
C -3.1553 -0.9249 0.1673
O -0.9091 -0.8895 -1.4780
C 0.4226 0.6500 -0.0961
O -1.9403 0.6391 1.6411
O -3.6308 -1.5177 1.1069
C 1.7734 0.0930 -0.6280
O 0.6442 1.1070 1.2190
C 2.7961 1.2385 -0.8186
O 2.2979 -0.9417 0.1683
O 3.8858 0.8597 -1.6117
H -0.4009 -1.3143 0.4844
H -2.3349 1.0390 -0.2528
H -3.4909 -1.1261 -0.8615
H -0.0522 -1.1155 -1.8272
H 0.1195 1.5189 -0.7325
H -2.0322 -0.0862 2.2502
H 1.5977 -0.4374 -1.5988
H -0.2204 1.2523 1.6061
H 3.1423 1.6308 0.1581
H 2.3529 2.0761 -1.3846
H 2.4151 -0.5980 1.0463
H 4.2939 0.1096 -1.1961
""") | 7b41df916cb06dccaa53f13461f2bb7c6bfd882a | 22,263 |
def format_user_id(user_id):
"""
Format user id so Slack tags it
Args:
user_id (str): A slack user id
Returns:
str: A user id in a Slack tag
"""
return f"<@{user_id}>" | 2b3a66739c3c9c52c5beb7161e4380a78c5e2664 | 22,264 |
import socket
import ssl
def test_module(params: dict):
"""
Returning 'ok' indicates that the integration works like it is supposed to.
This test works by running the listening server to see if it will run.
Args:
params (dict): The integration parameters
Returns:
'ok' if test passed, anything else will fail the test.
"""
try:
certificate = str(params.get('certificate'))
private_key = str(params.get('private_key'))
certificate_file = NamedTemporaryFile(mode='w', delete=False)
certificate_path = certificate_file.name
certificate_file.write(certificate)
certificate_file.close()
private_key_file = NamedTemporaryFile(mode='w', delete=False)
private_key_path = private_key_file.name
private_key_file.write(private_key)
private_key_file.close()
s = socket.socket()
ssl.wrap_socket(s, keyfile=private_key_path, certfile=certificate_path, server_side=True,
ssl_version=ssl.PROTOCOL_TLSv1_2)
return 'ok'
except ssl.SSLError as e:
if e.reason == 'KEY_VALUES_MISMATCH':
return 'Private and Public keys do not match'
except Exception as e:
return f'Test failed with the following error: {repr(e)}' | 0f49bff09fcb84fa810ee2c6d32a52089f2f0147 | 22,265 |
def class_loss_regr(num_classes, num_cam):
"""Loss function for rpn regression
Args:
num_anchors: number of anchors (9 in here)
num_cam : number of cam (3 in here)
Returns:
Smooth L1 loss function
0.5*x*x (if x_abs < 1)
x_abx - 0.5 (otherwise)
"""
def class_loss_regr_fixed_num(y_true, y_pred):
#x = y_true[:, :, 4*num_classes:] - y_pred
x = y_true[:, :, num_cam*4*num_classes:] - y_pred
x_abs = K.abs(x)
x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
#return lambda_cls_regr * K.sum(y_true[:, :, :4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4*num_classes])
return lambda_cls_regr * K.sum(y_true[:, :, :num_cam*4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :num_cam*4*num_classes])
#return lambda_cls_regr * K.sum(y_true[:, :, :num_cam*4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :num_cam*4*num_classes]) * 0
return class_loss_regr_fixed_num | cad962f1af1a1acb2013063c6803261535652c18 | 22,266 |
import smtplib
import ssl
def smtplib_connector(hostname, port, username=None, password=None, use_ssl=False):
""" A utility class that generates an SMTP connection factory.
:param str hostname: The SMTP server's hostname
:param int port: The SMTP server's connection port
:param str username: The SMTP server username
:param str password: The SMTP server port
:param bool use_ssl: Whether to use SSL
"""
def connect():
ctor = smtplib.SMTP_SSL if use_ssl else smtplib.SMTP
conn = ctor(hostname, port)
if use_ssl:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
conn.ehlo()
conn.starttls(context=context)
conn.ehlo()
if username or password:
conn.login(username, password)
return conn
return connect | 511fe48b1f3f2d5d3b9ef3a803166be1519a1b7f | 22,267 |
def _to_one_hot_sequence(indexed_sequence_tensors):
"""Convert ints in sequence to one-hots.
Turns indices (in the sequence) into one-hot vectors.
Args:
indexed_sequence_tensors: dict containing SEQUENCE_KEY field.
For example: {
'sequence': '[1, 3, 3, 4, 12, 6]' # This is the amino acid sequence.
... }
Returns:
indexed_sequence_tensors with the same overall structure as the input,
except that SEQUENCE_KEY field has been transformed to a one-hot
encoding.
For example:
{
# The first index in sequence is from letter C, which
# is at index 1 in the amino acid vocabulary, and the second is from
# E, which is at index 4.
SEQUENCE_KEY: [[0, 1, 0, ...], [0, 0, 0, 1, 0, ...]...]
...
}
"""
indexed_sequence_tensors[SEQUENCE_KEY] = tf.one_hot(
indices=indexed_sequence_tensors[SEQUENCE_KEY],
depth=len(utils.AMINO_ACID_VOCABULARY))
return indexed_sequence_tensors | 32ff14139b53f181d6f032e4e372357cf54c1d62 | 22,268 |
def kaiser_smooth(x,beta):
""" kaiser window smoothing """
window_len=41 #Needs to be odd for proper response
# extending the data at beginning and at the end
# to apply the window at the borders
s = np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]] #start:stop:step
w = np.kaiser(window_len,beta)
y = np.convolve(w/w.sum(),s,mode='valid')
return y[20:len(y)-20] | 2b766edd85927766330c8cddded3af639d5f16f3 | 22,269 |
def get_indel_dicts(bamfile, target):
"""Get all insertion in alignments within target. Return dict."""
samfile = pysam.AlignmentFile(bamfile, "rb")
indel_coverage = defaultdict(int)
indel_length = defaultdict(list)
indel_length_coverage = dict()
for c, s, e in parse_bed(target):
s = int(s) - 151
e = int(e) + 151
for alignment in samfile.fetch(c, int(s), int(e)):
if good_alignment(alignment) and cigar_has_insertion(alignment.cigarstring):
read_start = alignment.get_reference_positions(full_length=True)[0]
if read_start is None:
continue
locus, length = parse_cigartuple(alignment.cigar, read_start,
alignment.reference_name)
if pos_in_interval(locus.split(':')[1], s, e):
if locus in indel_length:
indel_length[locus].append(length)
else:
indel_length[locus] = [length]
indel_coverage[locus] += 1
samfile.close()
for locus, coverage in indel_coverage.items():
indel_length_coverage[locus] = tuple(set(indel_length[locus])), int(coverage)
return indel_length_coverage | e8f6883f1cf1d653fe0825b4f10518daa2801178 | 22,270 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.