content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import requests
def get_auth():
"""
POST request to users/login, returns auth token
"""
try:
url_user_login = f"https://{url_core_data}/users/login"
json = {
"username": creds_name,
"password": creds_pw
}
headers = {
"Accept": "application/json"
}
r = requests.post(url_user_login, headers=headers, json=json, verify=False)
response = r.json()
code = r.status_code
token = response["token"]
# print(f"RESPONSE: {response}")
# print(f"STATUS_CODE: {code}")
# print(f"TOKEN: {token}")
return token
except Exception as e:
auth_err_msg = f"Error authenticating with the DIVA API: \n\
{e}"
logger.error(auth_err_msg) | 86aa225a8c856dd46ece14d982a24b32a52a87ef | 14,890 |
def vector_between_points(P, Q):
""" vector between initial point P and terminal point Q """
return vector_subtract(Q, P); | de86c29dfe8c75b31040942d7195b8b92d731106 | 14,891 |
import time
def before_train(loaded_train_model, train_model, train_sess, global_step, hparams, log_f):
"""Misc tasks to do before training."""
stats = init_stats()
info = {"train_ppl": 0.0, "speed": 0.0, "avg_step_time": 0.0,
"avg_grad_norm": 0.0, "avg_train_sel": 0.0,
"learning_rate": loaded_train_model.learning_rate.eval(
session=train_sess)}
start_train_time = time.time()
print_out("# Start step %d, lr %g, %s" % (global_step, info["learning_rate"], time.ctime()), log_f)
# Initialize all of the iterators
skip_count = hparams.qe_batch_size * hparams.epoch_step
print_out("# Init train iterator, skipping %d elements" % skip_count)
train_sess.run(
train_model.iterator.initializer,
feed_dict={train_model.skip_count_placeholder: skip_count})
return stats, info, start_train_time | d333808bec0771e74d709f859b7423b9e561703f | 14,892 |
def methodInDB(method_name, dict_link, interface_db_cursor): #checks the database to see if the method exists already
"""
Method used to check the database to see if a method exists in the database
returns a list [Boolean True/False of if the method exists in the db, dictionary link/ID]
"""
crsr = interface_db_cursor
#splitting method into parts
if "::" in method_name:
method = method_name.split('::')
cn = method[0].strip()
mn = '::'.join(method[1:]).strip()
else:
cn = "Unknown"
mn = method_name
if dict_link == '': #dict link should only be empty on the initial call
# search for any method with the same name and class
crsr.execute("SELECT class_name, method_name, method_text, dict_link FROM methods WHERE class_name = ? AND method_name = ?", (cn, mn))
res = crsr.fetchall()
if len(res) == 0: #method not in table
return [False, '']
else: # found something, verify it is right
if len(res) == 1:
print('Method found in database.')
if res[0][0] == 'Unknown':
print(res[0][1])
else:
print('::'.join(res[0][0:2]))
print(res[0][2])
print('Is this the correct method? (Y/N)') #prompt the user to confirm that this is the right method
k = input()
k = k.strip()
while( k not in ['N', 'n', 'Y', 'y' ] ):
print('Invalid input, try again')
k = input()
if k == 'Y' or k == 'y':
return [True, res[0][3]]
elif k == 'N' or k == 'n':
return [False, '']
elif len(res) > 1:
print("\nMethod found in database")
count = 1
for r in res:
tmp = str(count) + ': '
print(tmp)
if r[0] == 'Unknown':
print(r[1])
else:
print('::'.join(r[0:2]))
print(r[2],'\n')
count += 1
print('Which one of these is the correct method?\nPut 0 for none of them.') #if there are multiple versions of the method in the db
# prompt the user to select which method is the right method, prints the method text
k = input()
try: k = int(k)
except: k = -1
while( int(k) > len(res) or int(k) < 0 ):
print("Invalid input: try again please")
k = input()
try: k = int(k)
except: k = -1
if k == 0:
return [False, '']
elif k > 0 and k <= len(res):
return [True, res[k-1][3]]
else: #there is a dict_link, can check for exact, usually what happens
crsr.execute("SELECT class_name, method_name FROM methods WHERE class_name = ? AND method_name = ? AND dict_link = ?", (cn, mn, dict_link))
#simple sql select
res = crsr.fetchall()
if len(res) == 0: #method not in table
return [False, dict_link]
elif len(res) > 0: # we found something
return [True, dict_link] | 8dc3ecc256b696a06906e63a461c241ff429e8ae | 14,894 |
def dict_to_image(screen):
""" Takes a dict of room locations and their block type output by RunGame.
Renders the current state of the game screen.
"""
picture = np.zeros((51, 51))
# Color tiles according to what they represent on screen:.
for tile in screen:
pos_x, pos_y = tile
if pos_x < 51 and pos_y < 51:
if screen[tile] == 46:
picture[pos_y][pos_x] = 0;
elif screen[tile] == 35:
picture[pos_y][pos_x] = 240;
else:
picture[pos_y][pos_x] = 150
return picture | 5657d3984a035d11854ef2b1f6dff642a00032a1 | 14,895 |
def can_fuse_to(wallet):
"""We can only fuse to wallets that are p2pkh with HD generation. We do
*not* need the private keys."""
return isinstance(wallet, Standard_Wallet) | 1ee8693d7457591a64057a4913d9739f96319e7a | 14,897 |
def _build_context(hps, encoder_outputs):
"""Compute feature representations for attention/copy.
Args:
hps: hyperparameters.
encoder_outputs: outputs by the encoder RNN.
Returns:
Feature representation of [batch_size, seq_len, decoder_dim]
"""
with tf.variable_scope("memory_context"):
context = tf.layers.dense(
encoder_outputs,
units=hps.decoder_dim,
activation=None,
use_bias=False,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="memory_projector")
return context | 6ce8a9a7845376f1804610d371d23b32fa9991f7 | 14,898 |
from typing import Union
def rf_local_divide(left_tile_col: Column_type, rhs: Union[float, int, Column_type]) -> Column:
"""Divide two Tiles cell-wise, or divide a Tile's cell values by a scalar"""
if isinstance(rhs, (float, int)):
rhs = lit(rhs)
return _apply_column_function('rf_local_divide', left_tile_col, rhs) | 76740879461e6dea302568d3bebc4dd6d7eb9363 | 14,899 |
def check_dependencies_ready(dependencies, start_date, dependencies_to_ignore):
"""Checks if every dependent pipeline has completed
Args:
dependencies(dict): dict from id to name of pipelines it depends on
start_date(str): string representing the start date of the pipeline
dependencies_to_ignore(list of str): dependencies to ignore if failed
"""
print 'Checking dependency at ', str(datetime.now())
dependency_ready = True
# Convert date string to datetime object
start_date = datetime.strptime(start_date, '%Y-%m-%d')
for pipeline in dependencies.keys():
# Get instances of each pipeline
instances = list_pipeline_instances(pipeline)
failures = []
# Collect all pipeline instances that are scheduled for today
instances_today = []
for instance in instances:
date = datetime.strptime(instance[START_TIME], '%Y-%m-%dT%H:%M:%S')
if date.date() == start_date.date():
instances_today.append(instance)
# Dependency pipeline has not started from today
if not instances_today:
dependency_ready = False
for instance in instances_today:
# One of the dependency failed/cancelled
if instance[STATUS] in FAILED_STATUSES:
if dependencies[pipeline] not in dependencies_to_ignore:
raise Exception(
'Pipeline %s (ID: %s) has bad status: %s'
% (dependencies[pipeline], pipeline, instance[STATUS])
)
else:
failures.append(dependencies[pipeline])
# Dependency is still running
elif instance[STATUS] != FINISHED:
dependency_ready = False
return dependency_ready, failures | 8ff01e54e3dae4110e7bd06accbc01b73148f4c3 | 14,900 |
def factor_returns(factor_data, demeaned=True, group_adjust=False):
"""
计算按因子值加权的投资组合的收益
权重为去均值的因子除以其绝对值之和 (实现总杠杆率为1).
参数
----------
factor_data : pd.DataFrame - MultiIndex
一个 DataFrame, index 为日期 (level 0) 和资产(level 1) 的 MultiIndex,
values 包括因子的值, 各期因子远期收益, 因子分位数,
因子分组(可选), 因子权重(可选)
demeaned : bool
因子分析是否基于一个多空组合? 如果是 True, 则计算权重时因子值需要去均值
group_adjust : bool
因子分析是否基于一个分组(行业)中性的组合?
如果是 True, 则计算权重时因子值需要根据分组和日期去均值
返回值
-------
returns : pd.DataFrame
每期零风险暴露的多空组合收益
"""
def to_weights(group, is_long_short):
if is_long_short:
demeaned_vals = group - group.mean()
return demeaned_vals / demeaned_vals.abs().sum()
else:
return group / group.abs().sum()
grouper = [factor_data.index.get_level_values('date')]
if group_adjust:
grouper.append('group')
weights = factor_data.groupby(grouper)['factor'] \
.apply(to_weights, demeaned)
if group_adjust:
weights = weights.groupby(level='date').apply(to_weights, False)
weighted_returns = \
factor_data[get_forward_returns_columns(factor_data.columns)] \
.multiply(weights, axis=0)
returns = weighted_returns.groupby(level='date').sum()
return returns | 127f26e20ca14cae5d9fc2e444ab93d98cc6b8c4 | 14,901 |
def create_input_lambda(i):
"""Extracts off an object tensor from an input tensor"""
return Lambda(lambda x: x[:, i]) | b574e5659723f5394590cedcc4305f9fade5021e | 14,902 |
def create_model_talos(params, time_steps, num_features, input_loss='mae', input_optimizer='adam',
patience=3, monitor='val_loss', mode='min', epochs=100, validation_split=0.1):
"""Uses sequential model class from keras. Adds LSTM layer. Input samples, timesteps, features.
Hyperparameters include number of cells, dropout rate. Output is encoded feature vector of the input data.
Uses autoencoder by mirroring/reversing encoder to be a decoder."""
model = Sequential()
model.add(LSTM(params['cells'], input_shape=(time_steps, num_features))) # one LSTM layer
model.add(Dropout(params['dropout']))
model.add(RepeatVector(time_steps))
model.add(LSTM(params['cells'], return_sequences=True)) # mirror the encoder in the reverse fashion to create the decoder
model.add(Dropout(params['dropout']))
model.add(TimeDistributed(Dense(num_features)))
print(model.optimizer)
model.compile(loss=input_loss, optimizer=input_optimizer)
es = tf.keras.callbacks.EarlyStopping(monitor=monitor, patience=patience, mode=mode)
history = model.fit(
X_train, y_train,
epochs=epochs, # just set to something high, early stopping will monitor.
batch_size=params['batch_size'], # this can be optimized later
validation_split=validation_split, # use 10% of data for validation, use 90% for training.
callbacks=[es], # early stopping similar to earlier
shuffle=False # because order matters
)
return history, model | e17becc7b95b07fb15059e8ef76a70fbfbd68b88 | 14,903 |
def ortho_init(scale=1.0):
"""
Orthogonal initialization for the policy weights
:param scale: (float) Scaling factor for the weights.
:return: (function) an initialization function for the weights
"""
# _ortho_init(shape, dtype, partition_info=None)
def _ortho_init(shape, *_, **_kwargs):
"""Intialize weights as Orthogonal matrix.
Orthogonal matrix initialization [1]_. For n-dimensional shapes where
n > 2, the n-1 trailing axes are flattened. For convolutional layers, this
corresponds to the fan-in, so this makes the initialization usable for
both dense and convolutional layers.
References
----------
.. [1] Saxe, Andrew M., James L. McClelland, and Surya Ganguli.
"Exact solutions to the nonlinear dynamics of learning in deep
linear
"""
# lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
# Added by Ronja
elif len(shape) == 3: # assumes NWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
gaussian_noise = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(gaussian_noise, full_matrices=False)
weights = u if u.shape == flat_shape else v # pick the one with the correct shape
weights = weights.reshape(shape)
return (scale * weights[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init | d82af86b0650c4588b1f4a22fea809cda1e72959 | 14,905 |
def get_rde_model(rde_version):
"""Get the model class of the specified rde_version.
Factory method to return the model class based on the specified RDE version
:param rde_version (str)
:rtype model: NativeEntity
"""
rde_version: semantic_version.Version = semantic_version.Version(rde_version) # noqa: E501
if rde_version.major == 1:
return NativeEntity1X
elif rde_version.major == 2:
return NativeEntity2X | 0ce32c2649ebdac84f6a000e40df8e85715733e1 | 14,906 |
import math
def pnorm(x, mu, sd):
"""
Normal distribution PDF
Args:
* scalar: variable
* scalar: mean
* scalar: standard deviation
Return type: scalar (probability density)
"""
return math.exp(- ((x - mu) / sd) ** 2 / 2) / (sd * 2.5) | 08896264db17493bc299a3e69b781c28429ef08f | 14,907 |
import numpy as np
import math
def getTransformToPlane(planePosition, planeNormal, xDirection=None):
"""Returns transform matrix from World to Plane coordinate systems.
Plane is defined in the World coordinate system by planePosition and planeNormal.
Plane coordinate system: origin is planePosition, z axis is planeNormal, x and y axes are orthogonal to z.
"""
# Determine the plane coordinate system axes.
planeZ_World = planeNormal/np.linalg.norm(planeNormal)
# Generate a plane Y axis by generating an orthogonal vector to
# plane Z axis vector by cross product plane Z axis vector with
# an arbitrarily chosen vector (that is not parallel to the plane Z axis).
if xDirection:
unitX_World = np.array(xDirection)
unitX_World = unitX_World/np.linalg.norm(unitX_World)
else:
unitX_World = np.array([0,0,1])
angle = math.acos(np.dot(planeZ_World,unitX_World))
# Normalize between -pi/2 .. +pi/2
if angle>math.pi/2:
angle -= math.pi
elif angle<-math.pi/2:
angle += math.pi
if abs(angle)*180.0/math.pi>20.0:
# unitX is not parallel to planeZ, we can use it
planeY_World = np.cross(planeZ_World, unitX_World)
else:
# unitX is parallel to planeZ, use unitY instead
unitY_World = np.array([0,1,0])
planeY_World = np.cross(planeZ_World, unitY_World)
planeY_World = planeY_World/np.linalg.norm(planeY_World)
# X axis: orthogonal to tool's Y axis and Z axis
planeX_World = np.cross(planeY_World, planeZ_World)
planeX_World = planeX_World/np.linalg.norm(planeX_World)
transformPlaneToWorld = np.row_stack((np.column_stack((planeX_World, planeY_World, planeZ_World, planePosition)),
(0, 0, 0, 1)))
transformWorldToPlane = np.linalg.inv(transformPlaneToWorld)
return transformWorldToPlane | 19073b3fefdb75a92ccc812538078e1d5ad72d75 | 14,908 |
def jp_runtime_dir(tmp_path):
"""Provides a temporary Jupyter runtime dir directory value."""
return mkdir(tmp_path, "runtime") | 17794c4b702d97d4040d89909452df5e4dd1344e | 14,909 |
def _softmax(X, n_samples, n_classes):
"""Derive the softmax of a 2D-array."""
maximum = np.empty((n_samples, 1))
for i in prange(n_samples):
maximum[i, 0] = np.max(X[i])
exp = np.exp(X - maximum)
sum_ = np.empty((n_samples, 1))
for i in prange(n_samples):
sum_[i, 0] = np.sum(exp[i])
return exp / sum_ | 8544a79dc52601e882383164ae34dd05d893ed2a | 14,910 |
from typing import OrderedDict
from typing import MutableMapping
def merge_dicts(dict1, dict2, dict_class=OrderedDict):
"""Merge dictionary ``dict2`` into ``dict1``"""
def _merge_inner(dict1, dict2):
for k in set(dict1.keys()).union(dict2.keys()):
if k in dict1 and k in dict2:
if isinstance(dict1[k], (dict, MutableMapping)) and isinstance(
dict2[k], (dict, MutableMapping)
):
yield k, dict_class(_merge_inner(dict1[k], dict2[k]))
else:
# If one of the values is not a dict, you can't continue
# merging it. Value from second dict overrides one in
# first and we move on.
yield k, dict2[k]
elif k in dict1:
yield k, dict1[k]
else:
yield k, dict2[k]
return dict_class(_merge_inner(dict1, dict2)) | b2013f888dfc3a1713153c7aa8a00ce4044fba07 | 14,911 |
import numpy
def jaccard_overlap_numpy(box_a: numpy.ndarray, box_b: numpy.ndarray) -> numpy.ndarray:
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: Multiple bounding boxes, Shape: [num_boxes,4]
box_b: Single bounding box, Shape: [4]
Return:
jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]"""
inter = intersect_numpy(box_a, box_b)
area_a = (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1]) # [A,B]
area_b = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]) # [A,B]
union = area_a + area_b - inter
return inter / union | 4fc79406724815a9d5982ac94344ca2e45993980 | 14,912 |
import glob
def find_pkg(pkg):
""" Find the package file in the repository """
candidates = glob.glob('/repo/' + pkg + '*.rpm')
if len(candidates) == 0:
print("No candidates for: '{0}'".format(pkg))
assert len(candidates) == 1
return candidates[0] | ac91f34ed7accd2c81e1c68e143319998de9cdf3 | 14,913 |
import random
def random_choice(lhs, ctx):
"""Element ℅
(lst) -> random element of a
(num) -> Random integer from 0 to a
"""
if vy_type(lhs) == NUMBER_TYPE:
return random.randint(0, lhs)
return random.choice(iterable(lhs, ctx=ctx)) | 9b4251a9b1d590742cab847035a2ef78c565af70 | 14,914 |
def ask(choices,
message="Choose one from [{choices}]{default}{cancelmessage}: ",
errormessage="Invalid input", default=None,
cancel=False, cancelkey='c',
cancelmessage='press {cancelkey} to cancel'):
"""
ask is a shorcut instantiate PickOne and use .ask method
"""
return PickOne(choices, message, errormessage, default, cancel, cancelkey,
cancelmessage).ask() | 09f1951a72800bb710167bbf0b2695b94a6370ec | 14,915 |
def _check_eq(value):
"""Returns a function that checks whether the value equals a
particular integer.
"""
return lambda x: int(x) == int(value) | 4d2a02727afd90dbc012d252b01ed72f745dc564 | 14,918 |
def query_data(session, agency_code, start, end, page_start, page_stop):
""" Request D2 file data
Args:
session - DB session
agency_code - FREC or CGAC code for generation
start - Beginning of period for D file
end - End of period for D file
page_start - Beginning of pagination
page_stop - End of pagination
"""
rows = initial_query(session).\
filter(file_model.is_active.is_(True)).\
filter(file_model.awarding_agency_code == agency_code).\
filter(func.cast_as_date(file_model.action_date) >= start).\
filter(func.cast_as_date(file_model.action_date) <= end).\
slice(page_start, page_stop)
return rows | f555685ae4072aec14db29ee3b3425f1b0de5adb | 14,919 |
def ProfitBefTax(t):
"""Profit before Tax"""
return (PremIncome(t)
+ InvstIncome(t)
- BenefitTotal(t)
- ExpsTotal(t)
- ChangeRsrv(t)) | 4787d1c34698beb1e493968e5302defdf1416516 | 14,920 |
def myCommand():
"""
listens to commands spoken through microphone (audio)
:returns text extracted from the speech which is our command
"""
r = sr.Recognizer()
with sr.Microphone() as source:
print('Say something...')
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1) # removed "duration=1" argument to reduce wait time
audio = r.listen(source)
try:
command = r.recognize_google(audio).lower()
print('You said: ' + command + '\n')
#loop back to continue to listen for commands if unrecognizable speech is received
except sr.UnknownValueError:
print('....')
command = myCommand()
except sr.RequestError as e:
print("????")
return command | ce0b3c01efa1fe0aa704183e6293d4ed7e5170e9 | 14,921 |
def hammer(ohlc_df):
"""returns dataframe with hammer candle column"""
df = ohlc_df.copy()
df["hammer"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["close"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["open"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df | 7445d20e27ad2e34702868eaad028c86e71ac3a7 | 14,922 |
def calcPhase(star,time):
"""
Calculate the phase of an orbit, very simple calculation but used quite a lot
"""
period = star.period
phase = time/period
return phase | 4b282d9e4fdb76a4358d895ba30b902328ce030c | 14,923 |
def advanced_search():
"""
Get a json dictionary of search filter values suitable for use with the javascript queryBuilder plugin
"""
filters = [
dict(
id='name',
label='Name',
type='string',
operators=['equal', 'not_equal', 'begins_with', 'ends_with', 'contains']
),
dict(
id='old_name',
label='Old Name',
type='string',
operators=['equal', 'not_equal', 'begins_with', 'ends_with', 'contains']
),
dict(
id='label',
label='Label',
type='string',
operators=['contains']
),
dict(
id='qtext',
label='Question Text',
type='string',
operators=['contains']
),
dict(
id='probe',
label='Probe',
type='string',
operators=['contains']
),
dict(
id='data_source',
label='Data Source',
type='string',
input='select',
values=valid_filters['data_source'],
operators=['equal', 'not_equal', 'in', 'not_in'],
multiple=True,
plugin='selectpicker'
),
dict(
id='survey',
label='Survey',
type='string',
input='select',
values=valid_filters['survey'],
operators=['equal', 'not_equal', 'in', 'not_in'],
multiple=True,
plugin='selectpicker'
),
dict(
id='wave',
label='Wave',
type='string',
input='select',
values=valid_filters['wave'],
operators=['equal', 'not_equal', 'in', 'not_in', 'is_null', 'is_not_null'],
multiple=True,
plugin='selectpicker'
),
dict(
id='respondent',
label='Respondent',
type='string',
input='select',
values=valid_filters['respondent'],
operators=['equal', 'not_equal', 'in', 'not_in', 'is_null', 'is_not_null'],
multiple=True,
plugin='selectpicker'
),
dict(
id='focal_person',
label='Focal Person',
type='string',
input='select',
values={'Focal Child': 'Focal Child', 'Mother': 'Mother', 'Father': 'Father', 'Primary Caregiver': 'Primary Caregiver', 'Partner': 'Partner', 'Other': 'Other'},
operators=['contains', 'is_null', 'is_not_null']
),
dict(
id='topics',
label='Topics',
type='string',
input='select',
values=valid_filters['topic'],
operators=['contains'],
multiple=True,
plugin='selectpicker'
),
dict(
id='subtopics',
label='Sub-Topics',
type='string',
input='select',
values=valid_filters['subtopic'],
operators=['contains'],
multiple=True,
plugin='selectpicker'
),
dict(
id='scale',
label='Scale',
type='string',
input='select',
values=valid_filters['scale'],
operators=['equal', 'not_equal', 'in', 'not_in', 'is_null', 'is_not_null'],
multiple=True,
plugin='selectpicker'
),
dict(
id='n_cities_asked',
label='Asked in (N) cities',
type='integer',
operators=['equal', 'not_equal', 'less', 'less_or_equal', 'greater', 'greater_or_equal', 'in', 'not_in'],
input='select',
values=valid_filters['n_cities_asked'],
multiple=True,
plugin='selectpicker'
),
dict(
id='data_type',
label='Data Type',
type='string',
input='select',
values=valid_filters['data_type'],
operators=['equal', 'not_equal', 'in', 'not_in'],
multiple=True,
plugin='selectpicker'
),
dict(
id='in_FFC_file',
label='FFC variable',
type='string',
input='select',
operators=['equal', 'not_equal', 'in', 'not_in', 'is_null', 'is_not_null'],
values={'yes': 'Yes', 'no': 'No'},
multiple=True,
plugin='selectpicker'
)
]
return jsonify({"filters": filters}) | 73453941eff4aa03def530691b52b109b1fe0a76 | 14,924 |
def rdp_rec(M, epsilon, dist=pldist):
"""
Simplifies a given array of points.
Recursive version.
:param M: an array
:type M: numpy array
:param epsilon: epsilon in the rdp algorithm
:type epsilon: float
:param dist: distance function
:type dist: function with signature ``f(point, start, end)`` -- see :func:`rdp.pldist`
"""
dmax = 0.0
index = -1
for i in range(1, M.shape[0]):
d = dist(M[i], M[0], M[-1])
if d > dmax:
index = i
dmax = d
if dmax > epsilon:
r1 = rdp_rec(M[:index + 1], epsilon, dist)
r2 = rdp_rec(M[index:], epsilon, dist)
return np.vstack((r1[:-1], r2))
else:
return np.vstack((M[0], M[-1])) | 2518ef902bb9d7e696145e86746804a6fca115f8 | 14,925 |
from datetime import datetime
def secBetweenDates(dateTime0, dateTime1):
"""
:param dateTime0:
:param dateTime1:
:return: The number of seconds between two dates.
"""
dt0 = datetime.strptime(dateTime0, '%Y/%m/%d %H:%M:%S')
dt1 = datetime.strptime(dateTime1, '%Y/%m/%d %H:%M:%S')
timeDiff = ((dt1.timestamp()) - (dt0.timestamp()))
return timeDiff | d9e2f839d8a7c10fbde8009ea1f69db56a222426 | 14,926 |
def iframe_home(request):
""" Página inicial no iframe """
# Info sobre pedidos de fabricação
pedidosFabricacao = models.Pedidofabricacao.objects.filter(
hide=False
).exclude(
fkid_statusfabricacao__order=3
).order_by(
'-fkid_statusfabricacao', 'dt_fim_maturacao'
)
context = {
"fabricacaoPiece":"iframe/pieces/fabricacaoDetail.html",
"pedidosFabricacao":pedidosFabricacao
}
return render(request, "iframe/home.html", context) | b8db02d3b8a019bdc23fd369ab4ccc96e9b77437 | 14,927 |
def inv(n: int, n_bits: int) -> int:
"""Compute the bitwise inverse.
Args:
n: An integer.
n_bits: The bit-width of the integers used.
Returns:
The binary inverse of the input.
"""
# We should only invert the bits that are within the bit-width of the
# integers we use. We set this mask to set the other bits to zero.
bit_mask = (1 << n_bits) - 1 # e.g. 0b111 for n_bits = 3
return ~n & bit_mask | 5be1eaf13490091096b8cd13fdbcdbbbe43760da | 14,929 |
def _render_flight_addition_page(error):
"""
Helper to render the flight addition page
:param error: Error message to display on the page or None
:return: The rendered flight addition template
"""
return render_template("flights/add.html",
airlines=list_airlines(),
airports=list_airports(),
error=error) | 916b14fa3b829b4fa6e0720d64bcdd74aab476ce | 14,930 |
def get_node_index(glTF, name):
"""
Return the node index in the glTF array.
"""
if glTF.get('nodes') is None:
return -1
index = 0
for node in glTF['nodes']:
if node['name'] == name:
return index
index += 1
return -1 | cb0c6a727e9786467861d0ea622462264269814a | 14,931 |
def online_user_count(filter_user=None):
"""
Returns the number of users online
"""
return len(_online_users()) | 5ab03f1ca6738925847b338e956a0c8afbbc4d7d | 14,932 |
def get_latest_version_url(start=29, template="http://unicode.org/Public/cldr/{}/core.zip"):
"""Discover the most recent version of the CLDR dataset.
Effort has been made to make this function reusable for other URL numeric URL schemes, just override `start` and
`template` to iteratively search for the latest version of any other URL.
"""
latest = None
with Session() as http: # We perform several requests iteratively, so let's be nice and re-use the connection.
for current in count(start):
result = http.head(template.format(current)) # We only care if it exists or not, thus HEAD use here.
if result.status_code != 200:
return current - 1, latest # Propagate the version found and the URL for that version.
latest = result.url | a93ff3081e6e0a5a507d79a5340b69b2be670f88 | 14,933 |
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
msg = f"the 'package' argument is required to perform a relative import for {name!r}"
raise TypeError(msg)
for character in name:
if character != '.':
break
level += 1
return _gcd_import(name[level:], package, level) | aeed1a00ec9149923c9be73a2346de4664ff98e3 | 14,935 |
import requests
from bs4 import BeautifulSoup
def get_image_links_from_imgur(imgur_url):
"""
Given an imgur URL, return a list of image URLs from it.
"""
if 'imgur.com' not in imgur_url:
raise ValueError('given URL does not appear to be an imgur URL')
urls = []
response = requests.get(imgur_url)
if response.status_code != 200:
raise ValueError('there was something wrong with the given URL')
soup = BeautifulSoup(response.text, 'html5lib')
# this is an album
if '/a/' in imgur_url:
matches = soup.select('.album-view-image-link a')
urls += [x['href'] for x in matches]
# directly linked image
elif 'i.imgur.com' in imgur_url:
urls.append(imgur_url)
# single-image page
else:
try:
urls.append(soup.select('.image a')[0]['href'])
except IndexError:
pass
# clean up image URLs
urls = [url.strip('/') for url in urls]
urls = ['http://{}'.format(url) if not url.startswith('http') else url
for url in urls]
return urls | 19d8f994cd1730c23fdf5d6105e8db916da67d15 | 14,937 |
def filter_ignored_images(y_true, y_pred, classification=False):
""" Filter those images which are not meaningful.
Args:
y_true: Target tensor from the dataset generator.
y_pred: Predicted tensor from the network.
classification: To filter for classification or
regression.
Returns: Filtered tensors.
"""
states = y_true[:, :, -1]
if classification:
indexes = tf.where(tf.math.not_equal(states, -1))
else:
indexes = tf.where(tf.math.equal(states, 1))
pred = y_pred
true = y_true[:, :, :-1]
true_filtered = tf.gather_nd(true, indexes)
pred_filtered = tf.gather_nd(pred, indexes)
return true_filtered, pred_filtered, indexes, states | a3f5e10c2f2961eafe734bc27c53e23294ee9eea | 14,938 |
def context_data_from_metadata(metadata):
""" Utility function transforming `metadata` into a context data dictionary.
Metadata may have been encoded at the client by `metadata_from_context_data`, or
it may be "normal" GRPC metadata. In this case, duplicate values are allowed;
they become a list in the context data.
"""
data = {}
for name, value in metadata:
if name.startswith(METADATA_PREFIX):
_, key = name.split(METADATA_PREFIX, 1)
data[key] = decode_value(value)
else:
if name in data:
try:
data[name].append(value)
except AttributeError:
data[name] = [data[name], value]
else:
data[name] = value
return data | 7b801b5835f7146a2ab163b83741113a039cb6bd | 14,939 |
def plot_results_fit(
xs,
ys,
covs,
line_ax,
lh_ax=None,
outliers=None,
auto_outliers=False,
fit_includes_outliers=False,
report_rho=False,
):
"""Do the fit and plot the result.
Parameters
----------
sc_ax : axes to plot the best fit line
lh_ax : axes to plot the likelihood function
xs, ys, covs: the data to use (see return value of plot_results_scatter)
outliers : list of int
list of indices for which data will be ignored in the fitting.
If auto_outliers is True, then this data will only be ignored
for the first iteration. The manual outlier choice positions the
fit where were we want it. Then, these points are added back in,
and ideally, the automatic outlier rejection will reject them in
an objective way. This is to make sure that we are not guilty of
cherry picking.
auto_outliers : bool
Use auto outlier detection in linear_ortho_maxlh, and mark
outliers on plot (line ax). See outlier detection function for
criterion.
fit_includes_outliers : bool
Use the detected outliers in the fitting, despite them being outliers.
report_rho: draw a box with the correlation coefficient AFTER outlier removal
Returns
-------
outlier_idxs : array of int
Indices of points treated as outliers
"""
# fix ranges before plotting the fit
line_ax.set_xlim(line_ax.get_xlim())
line_ax.set_ylim(line_ax.get_ylim())
r = linear_ortho_fit.linear_ortho_maxlh(
xs,
ys,
covs,
line_ax,
sigma_hess=True,
manual_outliers=outliers,
auto_outliers=auto_outliers,
fit_includes_outliers=fit_includes_outliers,
)
m = r["m"]
b_perp = r["b_perp"]
sm = r["m_unc"]
sb_perp = r["b_perp_unc"]
outlier_idxs = r["outlier_idxs"]
b = linear_ortho_fit.b_perp_to_b(m, b_perp)
# The fitting process also indicated some outliers. Do the rest without them.
if fit_includes_outliers:
xs_used = xs
ys_used = ys
covs_used = covs
else:
xs_used = np.delete(xs, outlier_idxs, axis=0)
ys_used = np.delete(ys, outlier_idxs, axis=0)
covs_used = np.delete(covs, outlier_idxs, axis=0)
# Looking at bootstrap with and without outliers might be interesting.
# boot_cov_mb = linear_ortho_fit.bootstrap_fit_errors(xs_no_out, ys_no_out, covs_no_out)
# boot_sm, boot_sb = np.sqrt(np.diag(boot_cov_mb))
# sample the likelihood function to determine statistical properties
# of m and b
a = 2
m_grid, b_perp_grid, logL_grid = linear_ortho_fit.calc_logL_grid(
m - a * sm,
m + a * sm,
b_perp - a * sb_perp,
b_perp + a * sb_perp,
xs_used,
ys_used,
covs_used,
)
# Sample the likelihood of (m, b_perp) and convert to (m, b), so we
# can properly determine the covariance.
sampled_m, sampled_b_perp = linear_ortho_fit.sample_likelihood(
m, b_perp, m_grid, b_perp_grid, logL_grid, N=2000
)
sampled_b = linear_ortho_fit.b_perp_to_b(sampled_m, sampled_b_perp)
sample_cov_mb = np.cov(sampled_m, sampled_b)
m_unc = np.sqrt(sample_cov_mb[0, 0])
b_unc = np.sqrt(sample_cov_mb[1, 1])
mb_corr = sample_cov_mb[0, 1] / (m_unc * b_unc)
# print out results here
print("*** FIT RESULT ***")
print(f"m = {m:.2e} pm {m_unc:.2e}")
print(f"b = {b:.2e} pm {b_unc:.2e}")
print(f"correlation = {mb_corr:.2f}")
if lh_ax is not None:
linear_ortho_fit.plot_solution_neighborhood(
lh_ax,
logL_grid,
[min(b_perp_grid), max(b_perp_grid), min(m_grid), max(m_grid)],
m,
b_perp,
cov_mb=sample_cov_mb,
what="L",
extra_points=zip(sampled_b_perp, sampled_m),
)
# pearson coefficient without outliers (gives us an idea of how
# reasonable the trend is)
print("VVV-auto outlier removal-VVV")
if report_rho:
plot_rho_box(
line_ax,
xs_used,
ys_used,
covs_used,
)
# plot the fitted line
xlim = line_ax.get_xlim()
xp = np.linspace(xlim[0], xlim[1], 3)
yp = m * xp + b
line_ax.plot(xp, yp, color=FIT_COLOR, linewidth=2)
# plot sampled lines
linear_ortho_fit.plot_solution_linescatter(
line_ax, sampled_m, sampled_b_perp, color=FIT_COLOR, alpha=5 / len(sampled_m)
)
# if outliers, mark them
if len(outlier_idxs) > 0:
line_ax.scatter(
xs[outlier_idxs],
ys[outlier_idxs],
marker="x",
color="y",
label="outlier",
zorder=10,
)
# return as dict, in case we want to do more specific things in
# post. Example: gathering numbers and putting them into a table, in
# the main plotting script (paper_scatter.py).
# Also return covariance and samples, useful for determining error on y = mx + b.
results = {
"m": m,
"m_unc": m_unc,
"b": b,
"b_unc": b_unc,
"mb_cov": sample_cov_mb[0, 1],
"outlier_idxs": outlier_idxs,
"m_samples": sampled_m,
"b_samples": sampled_b,
}
return results | 695d7f47fa8319f9fd085b51e4bb031acee0079a | 14,941 |
def check_for_features(cmph5_file, feature_list):
"""Check that all required features present in the cmph5_file. Return
a list of features that are missing.
"""
aln_group_path = cmph5_file['AlnGroup/Path'][0]
missing_features = []
for feature in feature_list:
if feature not in cmph5_file[aln_group_path].keys():
missing_features.append(feature)
return missing_features | 2d51e1389e6519607001ad2b0006581e6a876ddd | 14,942 |
def inverse(a: int, b: int) -> int:
"""
Calculates the modular inverse of a in b
:param a:
:param b:
:return:
"""
_, inv, _ = gcd_extended(a, b)
return inv % b | 5bde17e2526d5d8f940c8c384c2962dee8cb7188 | 14,943 |
def build_md2po_events(mkdocs_build_config):
"""Build dinamically those mdpo events executed at certain moments of the
Markdown file parsing extrating messages from pages, different depending on
active extensions and plugins.
"""
_md_extensions = mkdocs_build_config['markdown_extensions']
md_extensions = []
for ext in _md_extensions:
if not isinstance(ext, str):
if isinstance(ext, MkdocstringsExtension):
md_extensions.append('mkdocstrings')
else:
md_extensions.append(ext)
else:
md_extensions.append(ext)
def build_event(event_type):
parameters = {
'text': 'md2po_instance, block, text',
'msgid': 'md2po_instance, msgid, *args',
'link_reference': 'md2po_instance, target, *args',
}[event_type]
if event_type == 'text':
req_extension_conditions = {
'admonition': 're.match(AdmonitionProcessor.RE, text)',
'pymdownx.details': 're.match(DetailsProcessor.START, text)',
'pymdownx.snippets': (
're.match(SnippetPreprocessor.RE_ALL_SNIPPETS, text)'
),
'pymdownx.tabbed': 're.match(TabbedProcessor.START, text)',
'mkdocstrings': 're.match(MkDocsStringsProcessor.regex, text)',
}
body = ''
for req_extension, condition in req_extension_conditions.items():
if req_extension in md_extensions:
body += (
f' if {condition}:\n '
'md2po_instance.disabled_entries.append(text)\n'
' return False\n'
)
if not body:
return None
elif event_type == 'msgid':
body = (
" if msgid.startswith(': '):"
'md2po_instance._disable_next_line = True\n'
)
else: # link_reference
body = " if target.startswith('^'):return False;\n"
function_definition = f'def {event_type}_event({parameters}):\n{body}'
code = compile(function_definition, 'test', 'exec')
exec(code)
return locals()[f'{event_type}_event']
# load only those events required for the extensions
events_functions = {
event:
build_event(event) for event in ['text', 'msgid', 'link_reference']
}
events = {}
for event_name, event_function in events_functions.items():
if event_function is not None:
events[event_name] = event_function
return events | 5dd4cf7afe9168d4b110c197454b237f9267ce0e | 14,944 |
def is_three(x):
"""Return whether x is three.
>>> search(is_three)
3
"""
return x == 3 | a57266892eebf684945d0d841ede67965c751f1a | 14,945 |
def get_task_id(prefix, path):
"""Generate unique tasks id based on the path.
:parma prefix: prefix string
:type prefix: str
:param path: file path.
:type path: str
"""
task_id = "{}_{}".format(prefix, path.rsplit("/", 1)[-1].replace(".", "_"))
return get_unique_task_id(task_id) | 965b5df1d1cc80d489d4a003f453e53a96d4c38e | 14,946 |
def rot_permutated_geoms(geo, saddle=False, frm_bnd_key=[], brk_bnd_key=[], form_coords=[]):
""" convert an input geometry to a list of geometries
corresponding to the rotational permuations of all the terminal groups
"""
gra = graph(geo, remove_stereo=True)
term_atms = {}
all_hyds = []
neighbor_dct = automol.graph.atom_neighbor_keys(gra)
# determine if atom is a part of a double bond
unsat_atms = automol.graph.unsaturated_atom_keys(gra)
if not saddle:
rad_atms = automol.graph.sing_res_dom_radical_atom_keys(gra)
res_rad_atms = automol.graph.resonance_dominant_radical_atom_keys(gra)
rad_atms = [atm for atm in rad_atms if atm not in res_rad_atms]
else:
rad_atms = []
gra = gra[0]
for atm in gra:
if gra[atm][0] == 'H':
all_hyds.append(atm)
for atm in gra:
if atm in unsat_atms and atm not in rad_atms:
pass
else:
if atm not in frm_bnd_key and atm not in brk_bnd_key:
#if atm not in form_coords:
nonh_neighs = []
h_neighs = []
neighs = neighbor_dct[atm]
for nei in neighs:
if nei in all_hyds:
h_neighs.append(nei)
else:
nonh_neighs.append(nei)
if len(nonh_neighs) < 2 and len(h_neighs) > 1:
term_atms[atm] = h_neighs
geo_final_lst = [geo]
for atm in term_atms:
hyds = term_atms[atm]
geo_lst = []
for geom in geo_final_lst:
geo_lst.extend(_swap_for_one(geom, hyds))
geo_final_lst = geo_lst
return geo_final_lst | 347e358b311725801587a2174f62084b066b414e | 14,947 |
def wasserstein_loss(y_true, y_pred):
""" for more detail: https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py"""
return K.mean(y_true * y_pred) | bc99572e298a565e68fe41d38e1becb72b4c304d | 14,948 |
from typing import Optional
def call_and_transact(
contract_function: ContractFunction, transaction_params: Optional[TxParams] = None,
) -> HexBytes:
""" Executes contract_function.{call, transaction}(transaction_params) and returns txhash """
# First 'call' might raise an exception
contract_function.call(transaction_params)
return contract_function.transact(transaction_params) | 851904f85d757faa548f8988ffcdfe97188de288 | 14,949 |
import re
def compress_sparql(text: str, prefix: str, uri: str) -> str:
"""
Compress given SPARQL query by replacing all instances of the given uri with the given prefix.
:param text: SPARQL query to be compressed.
:param prefix: prefix to use as replace.
:param uri: uri instance to be replaced.
:return: compressed SPARQL query.
"""
bordersremv = lambda matchobj: prefix + ":" + re.sub(f"[<>]|({uri})", "", matchobj.group(0))
return re.sub(f"<?({uri}).*>?", bordersremv, text) | b86ceebadb262730fb4dec90b43e04a09d9c9541 | 14,951 |
from operator import mod
def easter(g_year):
"""Return fixed date of Easter in Gregorian year g_year."""
century = quotient(g_year, 100) + 1
shifted_epact = mod(14 +
11 * mod(g_year, 19) -
quotient(3 * century, 4) +
quotient(5 + (8 * century), 25), 30)
adjusted_epact = ((shifted_epact + 1)
if ((shifted_epact == 0) or ((shifted_epact == 1) and
(10 < mod(g_year, 19))))
else shifted_epact)
paschal_moon = (fixed_from_gregorian(gregorian_date(g_year, APRIL, 19)) -
adjusted_epact)
return kday_after(SUNDAY, paschal_moon) | e084e9a0ae755065bf7704da6aa1506894ad958e | 14,952 |
def _with_generator_error_translation(code_to_exception_class_func, func):
"""Same wrapping as above, but for a generator"""
@funcy.wraps(func)
def decorated(*args, **kwargs):
"""Execute a function, if an exception is raised, change its type if necessary"""
try:
for x in func(*args, **kwargs):
yield x
except grpc.RpcError as exc:
raise_exception_from_grpc_exception(code_to_exception_class_func, exc)
return decorated | fbd0491b2f7d68ecfaa5405a02203c3c8294bdc2 | 14,953 |
import requests
def openei_api_request(
data,
):
"""Query the OpenEI.org API.
Args:
data (dict or OrderedDict): key-value pairs of parameters to post to the
API.
Returns:
dict: the json response
"""
# define the Overpass API URL, then construct a GET-style URL as a string to
# hash to look up/save to cache
url = " https://openei.org/services/api/content_assist/recommend"
prepared_url = requests.Request("GET", url, params=data).prepare().url
cached_response_json = get_from_cache(prepared_url)
if cached_response_json is not None:
# found this request in the cache, just return it instead of making a
# new HTTP call
return cached_response_json | c02ef34fd3fc8327a0debc954eb1a211dc161978 | 14,954 |
def generate_content(vocab, length):
"""Generate a random passage.
Pass in a dictionary of words from a text document and a specified
length (number of words) to return a randomized string.
"""
new_content = []
pair = find_trigram(vocab)
while len(new_content) < length:
third = find_trigram(vocab, pair)
trigram = (pair + " " + third).split()
new_content.extend(*[trigram]) # unpack trigrams and add to content
next_one = find_trigram(vocab, trigram[1] + " " + trigram[2])
if len(next_one.split()) > 1:
pair = next_one
else:
next_two = find_trigram(vocab, trigram[2] + " " + next_one)
pair = next_one + " " + next_two
return " ".join(new_content) | 5897c507281ffccddfb28880a0c5678b0fab7363 | 14,955 |
def transform_generic(inp: dict, out, met: ConfigurationMeta) -> list:
""" handle_generic is derived from P -> S, where P and S are logic expressions.
This function will use a generic method to transform the logic expression P -> S
into multiple mathematical constraints. This is done by first converting r into
a logic expression Ç, then Ç is converted into CNF and last into constraints.
"""
support_variable_name = met.support_variable_name
P = None
if inp['condition'] and inp['condition']['sub_conditions']:
P = ""
evaluated_sub_conditions = []
for sub_condition in inp['condition']['sub_conditions']:
if sub_condition['relation'] == "ALL":
concat = " & ".join(sub_condition['components'])
elif sub_condition.relation == "ANY":
concat = " | ".join(sub_condition['components'])
else:
raise Exception(f"Not implemented for relation type: '{sub_condition.relation}'")
if not concat == '':
evaluated_sub_conditions.append(f"({concat})")
if inp['condition']['relation'] == "ALL":
P = " & ".join(evaluated_sub_conditions)
elif inp['condition']['relation'] == "ANY":
P = " | ".join(evaluated_sub_conditions)
else:
raise Exception(f"Not implemented for relation type: '{inp['condition']['relation']}'")
cmps = inp['consequence']['components']
if inp['consequence']['rule_type'] in ["REQUIRES_ALL", "PREFERRED"]:
S = " & ".join(cmps)
elif inp['consequence']['rule_type'] == "REQUIRES_ANY":
S = " | ".join(cmps)
elif inp['consequence']['rule_type'] == "FORBIDS_ALL":
_cmps = [f"~{x}" for x in cmps]
S = " & ".join(_cmps)
elif inp['consequence']['rule_type'] == "REQUIRES_EXCLUSIVELY":
if P == None:
return transform_exactly_one(inp=inp, out=out, met=met)
condition = []
for i in range(len(cmps)):
clause = [f"{cmps[j]}" if i == j else f"~{cmps[j]}" for j in range(len(cmps))]
condition.append(" & ".join(clause))
S = " | ".join([f"({x})" for x in condition])
else:
raise Exception(f"Not implemented for rule type '{inp['consequence']['rule_type']}'")
expression = S if not P else f"({P}) >> ({S})"
constraints = fake_expression_to_constraints(
expression=expression,
support_variable_name=support_variable_name,
)
_constraints = []
for constraint, support_vector_value in constraints:
constraint[support_variable_name] = support_vector_value
_constraints.append(constraint)
return _constraints | 0b87c001a94fb9ad3198651d0948bddf7d477b1b | 14,956 |
def generate_mprocess_from_name(
c_sys: CompositeSystem, mprocess_name: str, is_physicality_required: bool = True
) -> MProcess:
"""returns MProcess object specified by name.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of MProcess.
mprocess_name : str
name of the MProcess.
is_physicality_required: bool = True
whether the generated object is physicality required, by default True
Returns
-------
MProcess
MProcess object.
"""
# check mprocess name
single_mprocess_names = mprocess_name.split("_")
mprocess_name_list = get_mprocess_names_type1() + get_mprocess_names_type2()
for single_mprocess_name in single_mprocess_names:
if single_mprocess_name not in mprocess_name_list:
raise ValueError(
f"mprocess_name is out of range. mprocess_name={single_mprocess_name}"
)
# generate mprocess
hss = generate_mprocess_hss_from_name(mprocess_name, c_sys)
mprocess = MProcess(
hss=hss, c_sys=c_sys, is_physicality_required=is_physicality_required
)
return mprocess | 8be8f79610e424342fae3c6ddbccf2177d0941b1 | 14,957 |
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
def convert_polydata_to_image_data(poly, ref_im, reverse=True):
"""
Convert the vtk polydata to imagedata
Args:
poly: vtkPolyData
ref_im: reference vtkImage to match the polydata with
Returns:
output: resulted vtkImageData
"""
# Have to copy to create a zeroed vtk image data
ref_im_zeros = vtk.vtkImageData()
ref_im_zeros.DeepCopy(ref_im)
ref_im_zeros.GetPointData().SetScalars(numpy_to_vtk(np.zeros(vtk_to_numpy(ref_im_zeros.GetPointData().GetScalars()).shape)))
ply2im = vtk.vtkPolyDataToImageStencil()
ply2im.SetTolerance(0.05)
ply2im.SetInputData(poly)
ply2im.SetOutputSpacing(ref_im.GetSpacing())
ply2im.SetInformationInput(ref_im_zeros)
ply2im.Update()
stencil = vtk.vtkImageStencil()
stencil.SetInputData(ref_im_zeros)
if reverse:
stencil.ReverseStencilOn()
stencil.SetStencilData(ply2im.GetOutput())
stencil.Update()
output = stencil.GetOutput()
return output | 75a8780d287b5c2f5b2cc81d735859d56a5f9641 | 14,958 |
def matplot(x, y, f, vmin=None, vmax=None, ticks=None, output='output.pdf', xlabel='X', \
ylabel='Y', diverge=False, cmap='viridis', **kwargs):
"""
Parameters
----------
f : 2D array
array to be plotted.
extent: list [xmin, xmax, ymin, ymax]
Returns
-------
Save a fig in the current directory.
To be deprecated. Please use imshow.
"""
fig, ax = plt.subplots(figsize=(4,3))
set_style()
if diverge:
cmap = "RdBu_r"
else:
cmap = 'viridis'
xmin, xmax = min(x), max(x)
ymin, ymax = min(y), max(y)
extent = [xmin, xmax, ymin, ymax]
cntr = ax.imshow(f.T, aspect='auto', cmap=cmap, extent=extent, \
origin='lower', vmin=vmin, vmax=vmax, **kwargs)
ax.set_aspect('auto')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.colorbar(cntr, ticks=ticks)
ax.xaxis.set_ticks_position('bottom')
# fig.subplots_adjust(wspace=0, hspace=0, bottom=0.14, left=0.14, top=0.96, right=0.94)
if output is not None:
fig.savefig(output, dpi=1200)
return fig, ax | f50d7f7d8ebcb87a993001042f48afcc69616393 | 14,959 |
def createNewClasses(df, sc, colLabel):
"""
Divide the data into classes
Parameters
----------
df: Dataframe
Spark Dataframe
sc: SparkContext object
SparkContext object
colLabel: List
Items that considered Label
logs_dir: string
Directory for log file
Return
----------
colCat: List
Items that is considered categories
colNum: List
Items that is considered numerical values
"""
rdd = sc.parallelize(df.dtypes)
colCat = rdd.map(lambda i: i[0] if (i[1]=='string' or i[1]=='boolean' and i[0] not in colLabel) else None).filter(lambda i: i != None).collect()
colNum = rdd.map(lambda i: i[0] if (i[1]=='double' and i[0] not in colLabel) else None).filter(lambda i: i != None).collect()
print(f"Label: {colLabel} \nCategories: {colCat}\nNumerical: {colNum}")
return colCat, colNum | e28e5240bca65bd602234b6560b58d934012f530 | 14,960 |
def scan_usb(device_name=None):
""" Scan for available USB devices
:param device_name: The device name (MX6DQP, MX6SDL, ...) or USB device VID:PID value
:rtype list
"""
if device_name is None:
objs = []
devs = RawHid.enumerate()
for cls in SDP_CLS:
for dev in devs:
for value in cls.DEVICES.values():
if dev.vid == value[0] and dev.pid == value[1]:
objs += [cls(dev)]
return objs
else:
if ':' in device_name:
vid, pid = device_name.split(':')
devs = RawHid.enumerate(int(vid, 0), int(pid, 0))
return [SdpBase(dev) for dev in devs]
else:
for cls in SDP_CLS:
if device_name in cls.DEVICES:
vid = cls.DEVICES[device_name][0]
pid = cls.DEVICES[device_name][1]
devs = RawHid.enumerate(vid, pid)
return [cls(dev) for dev in devs]
return [] | 0178537f65d46b5e1333ef4ee8d590c68d619019 | 14,961 |
def _boolrelextrema(
data, comparator, axis=0, order: tsutils.IntGreaterEqualToOne = 1, mode="clip"
):
"""Calculate the relative extrema of `data`.
Relative extrema are calculated by finding locations where
comparator(data[n],data[n+1:n+order+1]) = True.
Parameters
----------
data: ndarray
comparator: function
function to use to compare two data points. Should take 2 numbers as
arguments
axis: int, optional
axis over which to select from `data`
order: int, optional
How many points on each side to require a `comparator`(n,n+x) = True.
mode: string, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema: ndarray
Indices of the extrema, as boolean array of same shape as data. True
for an extrema, False else.
See Also
--------
argrelmax, argrelmin
Examples
--------
>>> testdata = np.array([1,2,3,2,1])
>>> _boolrelextrema(testdata, np.greater, axis=0).tolist()
[False, False, True, False, False]
"""
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs)
for shift in range(1, order + 1):
plus = np.take(data, locs + shift, axis=axis, mode=mode)
results &= comparator(main, plus)
minus = np.take(data, locs - shift, axis=axis, mode=mode)
results &= comparator(main, minus)
if ~results.any():
return results
return results | b9315675845b27d77b39e1b0a8facd8cdda955c1 | 14,962 |
from bs4 import BeautifulSoup
def parse_description(offer_markup):
""" Searches for description if offer markup
:param offer_markup: Body from offer page markup
:type offer_markup: str
:return: Description of offer
:rtype: str
"""
html_parser = BeautifulSoup(offer_markup, "html.parser")
return html_parser.find(id="textContent").text.replace(" ", "").replace("\n", " ").replace("\r", "").strip() | 30464ca8ac313f4998fb067b46c3ec17e567da50 | 14,963 |
def return_args():
"""Return a parser object."""
_parser = ArgumentParser(add_help=True, description=(
"Translate msgid's from a POT file with Google Translate API"))
_parser.add_argument('-f', '--file', action='store', required=True,
help="Get the POT file name.")
_parser.add_argument('-o', '--output_file', action='store', required=True,
help="Get name to save the new PO file.")
_parser.add_argument('-t', '--translate', action='store', required=True,
help="Get language to translate to.")
_parser.add_argument('-i', '--imprecise', action='store_true',
help="Save translated texts as fuzzy(draft).")
_parser.add_argument('-e', '--error', action='store_true',
help="Print translate errors if exist.")
_parser.add_argument('-p', '--print_process', action='store_true',
help="Print translate process.")
return _parser | 45b608f25cbf9823dcd2dcaa070eaf97daf52895 | 14,964 |
def get_df_tau(plot_dict, gen_err):
"""
Return a dataframe of the kendall tau's coefficient for different methods
"""
# tau, p_value = compute_tau(result_dict[err], plot_dict['avg_clusters'], inverse=True)
# taus, pvalues, names, inverses = [tau], [p_value], ['cc'], ['True']
taus, pvalues, names, inverses = [], [], [], []
for key, value in plot_dict.items():
value = np.array(value)
# if key in ['ranks', 'stable_ranks', 'avg_clusters', 'modularity']:
# continue
for i in range(value.shape[1]):
if key == "Schatten":
if i == 0: # Schatten 1-norm, no inversion
inverse_flag = False
elif i == 1:
continue # skip trivial 2-norm
else:
inverse_flag = True
else:
inverse_flag = True
tau, p_value = compute_tau(gen_err, value[:, i], inverse=inverse_flag)
taus.append(tau)
pvalues.append(p_value)
names.append(key + "_" + str(i + 1))
inverses.append(inverse_flag)
kendal_cor = pd.DataFrame(
{"metric": names, "kendall_tau": taus, "pvalue": pvalues, "inverse": inverses}
)
return kendal_cor | 642af1f675aa1b323f8221cebb81aa98e4a9d188 | 14,965 |
def traverse(graph, priorities):
"""Return a sequence of all the nodes in the graph by greedily choosing high 'priority' nodes
before low 'priority' nodes."""
reachable = PriorityContainer()
visited = {}
# start by greedily choosing the highest-priority node
current_node = max(priorities.items(), key=lambda i: i[1])[0]
visited_count = 0
while current_node:
# visit node
visited[current_node] = visited_count
visited_count += 1
# update visit-able nodes
for neighbor in graph[current_node]['neighbors']:
if neighbor not in reachable and neighbor not in visited:
reachable.put((priorities[neighbor], neighbor))
try:
current_priority, current_node = reachable.get(False)
except Queue.Empty:
current_priority = current_node = None
return visited | 255c14348a1fb7ba33e85ad36537529434ce2865 | 14,966 |
def build_dataset(dataset_name, set_name, root_path, transforms=None):
"""
:param dataset_name: the name of dataset
:param root_path: data is usually located under the root path
:param set_name: "train", "valid", "test"
:param transforms:
:return:
"""
if "cameo_half_year" in dataset_name:
_, data_type, max_length, depth, profile_type = dataset_name.split("-")
max_length = int(max_length)
depth = int(depth)
dataset = CAMEO_HALF_YEAR(root=root_path, data_type=data_type,
transform=transforms, max_length_limit=max_length, depth=depth, profile_type=profile_type)
else:
raise Exception("Can not build unknown image dataset: {}".format(dataset_name))
return dataset | eb7a3090e03c95031f04d6f13f165c02eef8850c | 14,967 |
def remove_characters(text, characters_to_remove=None):
"""
Remove various auxiliary characters from a string.
This function uses a hard-coded string of 'undesirable'
characters (if no such string is provided),
and removes them from the text provided.
Parameters:
-----------
text : str
A piece of text to remove characters from.
characters_to_remove : str
A string of 'undesirable' characters to remove from the text.
Returns:
--------
text : str
A piece of text with undesired characters removed.
"""
# chars = "\\`*_{}[]()<>#+-.!$%@"
if characters_to_remove is None:
characters_to_remove = "\\`*_{}[]()<>#+!$%@"
for c in characters_to_remove:
if c in text:
text = text.replace(c, '')
return text | d2864983bfa3d58c631ff91a8719d45392f4bf42 | 14,968 |
def changePrev ( v, pos, findPat, changePat, bodyFlag = 1 ):
"""
changePrev: use string.rfind() to change text in a Leo outline.
v the vnode to start the search.
pos the position within the body text of v to start the search.
findPat the search string.
changePat the replacement string.
bodyFlag true: change body text. false: change headline text.
returns a tuple (v,pos) showing where the change occured.
returns (None,0) if no further match in the outline was found.
Note: if (v,pos) is a tuple returned previously from changePrev,
changePrev(v,pos-len(findPat),findPat,changePat)
changes the next matching string.
"""
n = len(findPat)
v, pos = findPrev(v, pos, findPat, bodyFlag)
if v == None:
return None, 0
if bodyFlag:
s = v.bodyString()
# s[pos:pos+n] = changePat
s = s[:pos] + changePat + s[pos+n:]
v.setBodyStringOrPane(s)
else:
s = v.headString()
#s[pos:pos+n] = changePat
s = s[:pos] + changePat + s[pos+n:]
v.setHeadStringOrHeadline(s)
return v, pos | 5c45b08b6aba5f7e699e1864e9a44af457b46d17 | 14,969 |
def trait_colors(rows):
"""Make tags for HTML colorizing text."""
backgrounds = defaultdict(lambda: next(BACKGROUNDS))
for row in rows:
for trait in row['traits']:
key = trait['trait']
if key not in ('heading',):
_ = backgrounds[key]
return backgrounds | 851783d8fa5acca3b9c7f1f3ea1e59466f056ad0 | 14,971 |
import json
def webhook():
""" CI with GitHub & PythonAnywhere
Author : Aadi Bajpai
https://medium.com/@aadibajpai/deploying-to-pythonanywhere-via-github-6f967956e664 """
try:
event = request.headers.get('X-GitHub-Event')
# Get payload from GitHub webhook request
payload = request.get_json()
x_hub_signature = request.headers.get('X-Hub-Signature')
# Check if signature is valid
if not github.is_valid_signature(x_hub_signature, request.data):
abort(401)
if event == "ping":
return json.dumps({'msg': 'Ping Successful!'})
if event != "push":
return json.dumps({'msg': "Wrong event type"})
repo = git.Repo(my_directory)
branch = payload['ref'][11:]
# Checking that branch is a non staging deployments
if my_directory != "/home/stagingapi/mysite":
if branch != 'master':
return json.dumps({'msg': 'Not master; ignoring'})
repo.git.reset('--hard')
origin = repo.remotes.origin
try:
origin.pull(branch)
utility.write("tests/gitstats.txt",
f'{branch} ,' + str(payload["after"]))
return f'Updated PythonAnywhere successfully with branch: {branch}'
except Exception:
origin.pull('master')
utility.write("tests/gitstats.txt",
f'{branch} ,' + str(payload["after"]))
return 'Updated PythonAnywhere successfully with branch: master'
except Exception as error_message:
return utility.handle_exception(
"Github Update Server", {error_message}) | 998a82897b2aa36dfef6e8125b34964b47218621 | 14,972 |
def pension_drawdown(months, rate, monthly_drawdown, pension_pot):
""" Returns the balance left in the pension pot after drawing an income for the given nr of months """
return monthly_growth(months, rate, -monthly_drawdown, pension_pot) | 2b2d811bbe134eca71d2965de4b06a62a71ccf85 | 14,974 |
def bytesToUInt(bytestring):
"""Unpack 4 byte string to unsigned integer, assuming big-endian byte order"""
return _doConv(bytestring, ">", "I") | f3d645d71b3503b8e5b4b052fe33e839a82c3782 | 14,975 |
def use(*authenticator_classes):
""" A decorator to attach one or more :class:`Authenticator`'s to the decorated class.
Usage:
from thorium import auth
@auth.use(BasicAuth, CustomAuth)
class MyEngine(Endpoint):
...
OR
@auth.use(BasicAuth)
@auth.use(CustomAuth)
class MyEngine(Endpoint):
...
:param authenticator_classes: One or more :class:`Authenticator` class definitions.
"""
def wrapped(cls):
if not cls._authenticator_classes:
cls._authenticator_classes = []
cls._authenticator_classes.extend(authenticator_classes)
return cls
return wrapped | 27aeb7711c842540a1ed77a76cebeb61e0342f1e | 14,976 |
def list_standard_models():
"""Return a list of all the StandardCellType classes available for this simulator."""
standard_cell_types = [obj for obj in globals().values() if isinstance(obj, type) and issubclass(obj, standardmodels.StandardCellType)]
for cell_class in standard_cell_types:
try:
create(cell_class)
except Exception, e:
print "Warning: %s is defined, but produces the following error: %s" % (cell_class.__name__, e)
standard_cell_types.remove(cell_class)
return [obj.__name__ for obj in standard_cell_types] | 7c7d36c5931340ddca5dcad91b34b9e9deb6ef1b | 14,977 |
def AchievableTarget(segments,target,Speed):
"""
The function checks if the car can make the required curvature to reach the target, taking into account its speed
Return [id, radius, direction}
id = 1 -> achievable else id =0
direction = 1 -> right direction = -1 -> left
"""
Rminamaxlat=Speed**2/parameters.Max_accelerationlateral
Rminepsilonmax=parameters.tsb*Speed**2/(parameters.epsilonmax*pi/180)+parameters.Car_length/(parameters.epsilonmax*pi/180)
Rmin=max(Rminamaxlat,Rminepsilonmax)
Rmax=abs(CurvatureRadius(target))/3
xp=target[0]
yp=target[1]
Ns=len(segments)
#coeficient
K=0
if xp!=0:
K=yp/xp
#Calculating which way the car will turn
direction=1 #right
if yp<0:
direction=-1 #left
#If the radius of curvature is greater than the minimum possible then the objective is not reachable
if Rmin>Rmax:
return(0,Rmax,direction)
#Adding possible radius values between the minimum and the maximum in the list R []
R=[]
Nr=100
i=0
while i<Nr:
R.append(Rmax-i*(Rmax-Rmin)/(Nr-1))
i+=1
#Checking all posible radius
i=0
while i<Nr:
r=R[i]
yc=direction*r
#If the car and the segment are aligned then the arc is a straight line without problems
if yp==0:
return(1,Rmax,1)
if xp!=0:
xinter=(-2*K*yc)/(1+K**2)
yinter=K*xinter
j=0
while (j<Ns and IntersectionArc([xinter,yinter],segments[j])!=1):
j+=1
if j==Ns:
return(1,r,direction)
return(0,r,direction)
xinter=0
yinter=direction*2*r
theta=180
j=0
while (j<Ns and IntersectionArc([xinter,yinter],segments[j])!=1):
j+=1
if j==Ns:
return(1,r,direction)
return(0,r,direction)
i+=1 | 1608847c224b5315cd668d650b52b9a6184c84ac | 14,978 |
import math
def mutual_information(co_oc, oi, oj, n):
"""
:param co_oc: Number of co occurrences of the terms oi and oj in the corpus
:param oi: Number of occurrences of the term oi in the corpus
:param oj: Number of occurrences of the term oi in the corpus
:param n: Total number of words in the corpus
:return:
"""
e = (oi * oj)/n
return math.log2(co_oc/e) | 76c27295c7e757282573eab71f2bb7cfd3df74cb | 14,980 |
def is_dark(color: str) -> bool:
"""
Whether the given color is dark of bright
Taken from https://github.com/ozh/github-colors
"""
l = 0.2126 * int(color[0:2], 16) + 0.7152 * int(color[2:4], 16) + 0.0722 * int(color[4:6], 16)
return False if l / 255 > 0.65 else True | 80fe2c4bd42b20fedff11ef200ae5ca246d4489d | 14,983 |
from datetime import datetime
def get_date_input_examples(FieldClass) -> list:
"""
Generate examples for a valid input value.
:param FieldClass: InputField
:return: List of input examples.
"""
r = []
for f in FieldClass.input_formats:
now = datetime.now()
r.append(now.strftime(f))
return r | e0b73aac49ac2bbd6423faa3e5e5ebfb81c2d7b7 | 14,984 |
def sve_logistic():
"""SVE of the logistic kernel for Lambda = 42"""
print("Precomputing SVEs for logistic kernel ...")
return {
10: sparse_ir.compute_sve(sparse_ir.LogisticKernel(10)),
42: sparse_ir.compute_sve(sparse_ir.LogisticKernel(42)),
10_000: sparse_ir.compute_sve(sparse_ir.LogisticKernel(10_000))
} | 774365aa9f17c66ea8a3296a08fe1d0972c82ad6 | 14,985 |
def post_team_iteration(id, team, organization=None, project=None, detect=None): # pylint: disable=redefined-builtin
"""Add iteration to a team.
:param id: Identifier of the iteration.
:type: str
:param team: Name or ID of the team.
:type: str
"""
organization, project = resolve_instance_and_project(detect=detect, organization=organization, project=project)
client = get_work_client(organization)
team_context = TeamContext(project=project, team=team)
team_setting_iteration = TeamSettingsIteration(id=id)
try:
team_iteration = client.post_team_iteration(iteration=team_setting_iteration, team_context=team_context)
return team_iteration
except AzureDevOpsServiceError as ex:
_handle_empty_backlog_iteration_id(ex=ex, client=client, team_context=team_context) | 78648eba53e50be7023ac88f9a4ffe2635c74d5b | 14,986 |
import collections
def JoinTypes(types):
"""Combine a list of types into a union type, if needed.
Leaves singular return values alone, or wraps a UnionType around them if there
are multiple ones, or if there are no elements in the list (or only
NothingType) return NothingType.
Arguments:
types: A list of types. This list might contain other UnionTypes. If
so, they are flattened.
Returns:
A type that represents the union of the types passed in. Order is preserved.
"""
queue = collections.deque(types)
seen = set()
new_types = []
while queue:
t = queue.popleft()
if isinstance(t, pytd.UnionType):
queue.extendleft(reversed(t.type_list))
elif isinstance(t, pytd.NothingType):
pass
elif t not in seen:
new_types.append(t)
seen.add(t)
if len(new_types) == 1:
return new_types.pop()
elif any(isinstance(t, pytd.AnythingType) for t in new_types):
return pytd.AnythingType()
elif new_types:
return pytd.UnionType(tuple(new_types)) # tuple() to make unions hashable
else:
return pytd.NothingType() | 0d43551a2882fa75a1827302811670fefe19433c | 14,987 |
def calc_nominal_strike(traces: np.ndarray):
"""
Gets the start and ending trace of the fault and ensures order for largest lon value first
Parameters
----------
traces: np.ndarray
Array of traces of points across a fault with the format [[lon, lat, depth],...]
"""
# Extract just lat and lon for the start and end of the traces
trace_start, trace_end = [traces[0][0], traces[0][1]], [
traces[-1][0],
traces[-1][1],
]
# Ensures correct order
if trace_start[0] < trace_end[0]:
return np.asarray([trace_end]), np.asarray([trace_start])
else:
return np.asarray([trace_start]), np.asarray([trace_end]) | 21c5c2de8c136ac44cbea401dce79c84007fc4ac | 14,988 |
def merge_options(custom_options, **default_options):
"""
Utility function to merge some default options with a dictionary of custom_options.
Example: custom_options = dict(a=5, b=3)
merge_options(custom_options, a=1, c=4)
--> results in {a: 5, b: 3, c: 4}
"""
merged_options = default_options
merged_options.update(custom_options)
return merged_options | a1676c9304f3c231aefaeb107c8fb6f5a8251b26 | 14,989 |
def build_wall(game: Board, player: Player) -> float:
"""
Encourage the player to go the middle row and column of the board
to increase the chances of a partition in the later game
"""
position = game.get_player_location(player)
blanks = game.get_blank_spaces()
blank_vertical = [loc for loc in blanks
if position[1] == 3]
blank_horizontal = [loc for loc in blanks
if position[0] == 3]
vertical = len(blank_vertical)
horizontal = len(blank_horizontal)
if position == (3, 3):
return max(vertical, horizontal)
elif position[0] == 3:
return horizontal
elif position[1] == 3:
return vertical
else:
return 0 | 9309e152b704317442e646d2283c0b20041c55b9 | 14,990 |
from bs4 import BeautifulSoup
def get_menu_from_hzu_navigation():
"""
获取惠州学院官网的导航栏的 HTML 文本。
:return: 一个 ul 标签文本
"""
try:
html = urlopen("https://www.hzu.edu.cn/")
except HTTPError as e:
print(e)
print('The page is not exist or have a error in getting page.')
return None
except URLError as e:
print(e)
print("url is wrong or the url couldn't open.")
return None
try:
bs = BeautifulSoup(html.read(), 'html.parser')
return bs.find(id='naver').find('ul', {'class': {'wp-menu'}})
except AttributeError as e:
print(e)
print('某个标签元素不存在 或者url错误(服务器不存在)导致html.read()出错')
return None | 1e0fab3402aeaca8bce3a93787f98a9360ffe49f | 14,991 |
def calc_user_withdraw_fee(user_id, amount):
"""手续费策略"""
withdraw_logs = dba.query_user_withdraw_logs(user_id, api_x.utils.times.utctoday())
if len(withdraw_logs) > 0:
return Decimal('2.00')
return Decimal('0.00') | a9d28ad6c3cb2cf801ac8fdcf67e3f9d2c804a67 | 14,992 |
def get_last_row(dbconn, tablename, n=1, uuid=None):
"""
Returns the last `n` rows in the table
"""
return fetch(dbconn, tablename, n, uuid, end=True) | 0c70b6fca976b4f97fb816279653e0c2bbd67d5c | 14,993 |
from typing import Optional
def get_start(period, reference_date: Optional[FlexDate] = None, strfdate="%Y-%m-%d") -> FlexDate:
"""
Returns the first day of the given period for the reference_date.
Period can be one of the following: {'year', 'quarter', 'month', 'week'}
If reference_date is instance of str, returns a string.
If reference_date is instance of datetime.date, returns a datetime.date instance.
If reference_date is instance of SmartDate, returns a SmartDate instance.
If no reference_date given, returns a SmartDate instance.
Examples
--------
>>> # when no reference is given assume that it is datetime.date(2018, 5, 8)
>>> get_start('month')
SmartDate(2018, 5, 1)
>>> get_start('quarter', '2017-05-15')
'2017-04-01'
>>> get_start('year', datetime.date(2017, 12, 12))
datetime.date(2017, 01, 01)
"""
start_functions = {
"decade": _get_decade_start,
"year": _get_year_start,
"quarter": _get_quarter_start,
"month": _get_month_start,
"fortnight": _get_fortnight_start,
"week": _get_week_start,
"day": _get_day_start,
"decades": _get_decade_start,
"years": _get_year_start,
"quarters": _get_quarter_start,
"months": _get_month_start,
"fortnights": _get_fortnight_start,
"weeks": _get_week_start,
"days": _get_day_start,
}
return start_functions[period](reference_date or SmartDate.today(), strfdate) | 53016712c6949291fe2e4e81de0ae993da4311c1 | 14,994 |
def prepare_lc_df(star_index, frame_info, magmatch, magx):
"""Prepare cleaned light curve data
Add mag, mag_err, magx, and magx_err to info
Remove nan values or too bright values in magx
Args:
star_index (int): index of the star
frame_info (DataFrame): info data
magmatch (array): raw photometry array
magx (array): corrected photometry array
Returns:
lc (array): light curve data
"""
lc = frame_info.copy()
lc = lc.assign(mag=magmatch[star_index, :, 0])
lc = lc.assign(mag_err=magmatch[star_index, :, 1])
lc = lc.assign(magx=magx[star_index, :, 0])
lc = lc.assign(magx_err=magx[star_index, :, 1])
lc = lc[~np.isnan(lc.magx) & (lc.magx > 1)]
return lc | b99123fb5bd0b84a84576791d578b5ae91f05575 | 14,995 |
def _filter_nones(centers_list):
"""
Filters out `None` from input list
Parameters
----------
centers_list : list
List potentially containing `None` elements
Returns
-------
new_list : list
List without any `None` elements
"""
return [c for c in centers_list if c is not None] | 031e878ebc8028deea238f5ac902ca55dba72a6d | 14,996 |
import multiprocessing
import time
def exec_in_subprocess(func, *args, poll_interval=0.01, timeout=None, **kwargs):
""" Execute a function in a fork
Args:
func (:obj:`types.FunctionType`): function
* args (:obj:`list`): list of positional arguments for the function
poll_interval (:obj:`float`, optional): interval to poll the status of the subprocess
timeout (:obj:`float`, optional): maximum execution time in seconds
**kwargs (:obj:`dict`, optional): dictionary of keyword arguments for the function
Returns:
:obj:`object`: result of the function
"""
context_instance = multiprocessing.get_context('fork')
queue = context_instance.Queue()
process = Process(target=subprocess_target, args=[queue, func] + list(args), kwargs=kwargs)
process.start()
start_time = time.time()
while process.exception is None:
time.sleep(poll_interval)
if timeout is not None and (time.time() - start_time) > timeout:
raise TimeoutError('Execution did not complete in {} s.'.format(timeout))
if process.exception:
raise process.exception
results = queue.get()
return results | b04b506ced8f5e90489dd789ebd9f77fd4487d8a | 14,998 |
def get_cv_score_table(clf):
"""
Get a table (DataFrame) of CV parameters and scores for each combination.
:param clf: Cross-validation object (GridSearchCV)
:return:
"""
# Create data frame
df = pd.DataFrame(list(clf.cv_results_['params']))
# Add test scores
df['rank'] = clf.cv_results_['rank_test_score']
df['test_mean'] = clf.cv_results_['mean_test_score']
df['test_sd'] = clf.cv_results_['std_test_score']
# Add scores over training data
df['train_mean'] = clf.cv_results_['mean_train_score']
df['train_sd'] = clf.cv_results_['std_train_score']
# Add time metrics (s)
df['fit_time_mean'] = clf.cv_results_['mean_fit_time']
df['fit_time_sd'] = clf.cv_results_['std_fit_time']
df['score_time_mean'] = clf.cv_results_['mean_score_time']
df['score_time_sd'] = clf.cv_results_['std_score_time']
return df | e1912b6545b0a6649fa66673d2fdd5dfd2b91cd5 | 14,999 |
def model_handle_check(model_type):
"""
Checks for the model_type and model_handle on the api function,
model_type is a argument to this decorator, it steals model_handle and checks if it is
present in the MODEL_REGISTER
the api must have model_handle in it
Args:
model_type: the "type" of the model, as specified in the MODEL_REGISTER
Returns:
wrapped api function
"""
def decorator(api_func):
@wraps(api_func)
def wrapper(*args, model_handle, **kwargs):
if model_handle not in MODEL_REGISTER:
return make_response(
jsonify(
{"error": f"{model_handle} not found in registered models"}
),
404,
)
if (
model_handle in MODEL_REGISTER
and MODEL_REGISTER[model_handle]["type"] != model_type
):
return make_response(
jsonify({"error": f"{model_handle} model is not an {model_type}"}),
412,
)
return api_func(*args, model_handle=model_handle, **kwargs)
return wrapper
return decorator | 1c2bab3399dff743fd1ca1a37971a4e71f5d5b8f | 15,000 |
def train_model_mixed_data(type_tweet, split_index, custom_tweet_data = pd.Series([]), stop_words = "english"):
"""
Fits the data on a Bayes model. Modified train_model() with custom splitting of data.
:param type_tweet:
:param split_index:
:param custom_tweet_data: if provided, this is used instead of test data for prediction
:param stop_words:
:return: training_data, testing_data , label_train, label_test
"""
data_train = type_tweet['tweet'][:split_index]
label_train = type_tweet['class'][:split_index]
data_test = type_tweet['tweet'][split_index:]
label_test = type_tweet['class'][split_index:]
#probably better to not remove any stopwords
count_vector = CountVectorizer(stop_words=[])
# Fit training data and return a matrix
training_data = count_vector.fit_transform(data_train)
# Transform testing data and return a matrix.
if not custom_tweet_data.empty:
testing_data = count_vector.transform(custom_tweet_data)
else:
testing_data = count_vector.transform(data_test)
return training_data, testing_data , label_train, label_test | 4c7d4e29562b63ea53f1832af0841fb112c6596a | 15,001 |
import scipy
def _fit_curves(ns, ts):
"""Fit different functional forms of curves to the times.
Parameters:
ns: the value of n for each invocation
ts: the measured run time, as a (len(ns), reps) shape array
Returns:
scores: normalised scores for each function
coeffs: coefficients for each function
names: names of each function
fns: the callable for each function in turn.
"""
# compute stats
med_times = np.median(ts, axis=1)
# fit and score complexities
scores = []
coeffs = []
names = []
fns = []
ns = np.array(ns)
ts = np.array(med_times)
for c_name, c_fn in complexities.items():
res = scipy.optimize.minimize_scalar(
complexity_fit, bracket=[1e-5, 1e5], args=(c_fn, ns, ts)
)
scores.append(res.fun)
coeffs.append(res.x)
names.append(c_name)
fns.append(c_fn)
scores = 1.0 / np.sqrt(np.array(scores))
tot_score = np.sum(scores)
scores = scores / tot_score
return scores, coeffs, names, fns | 9a480869d930e27d9aa988455228e6197f87417a | 15,002 |
def isolate_integers(string):
"""Isolate positive integers from a string, returns as a list of integers."""
return [int(s) for s in string.split() if s.isdigit()] | cc95f7a37e3ae258ffaa54ec59f4630c600e84e1 | 15,003 |
def extractAFlappyTeddyBird(item):
"""
# A Flappy Teddy Bird
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'The Black Knight who was stronger than even the Hero' in item['title']:
return buildReleaseMessageWithType(item, 'The Black Knight Who Was Stronger than Even the Hero', vol, chp, frag=frag, postfix=postfix)
return False | ca382caa9d1d9244424a39d1bc43c141b003691d | 15,004 |
def get_trainable_vars(name):
"""
returns the trainable variables
:param name: (str) the scope
:return: ([TensorFlow Variable])
"""
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name) | c45b075c739e8c86d6f1dadc0b1f4eacfb1d1505 | 15,005 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.