content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def dot(p, q):
"""
Compute dot product between two 3D vectors
p: array
Cartesian coordinates for one of the vectors
q: array
Cartesian coordinates for one of the vectors
"""
return p[0] * q[0] + p[1] * q[1] + p[2] * q[2]
|
28a073690e1e89128a997ae75b8782ee0cfb7252
| 19,600 |
import os
import sys
import tempfile
def create(protocol, host, port, objname):
"""
Sets up an environment through which the program can be re-invoked by
itself. In this new environment, the output will be the username and
password stored in the keyring for the specified server.
Refer to `credentials.keyring` for what the arguments mean.
"""
# Set up an environment in which 'git fetch' will load username and
# password from this script, not prompt the user in terminal.
env = deepcopy(os.environ)
env['GIT_ASKPASS'] = sys.argv[0] # Use the entry point of the script.
env['SR_ASKPASS'] = '1'
env['SR_ASKPASS_PROTOCOL'] = protocol if protocol else ''
env['SR_ASKPASS_SERVER'] = host if host else ''
env['SR_ASKPASS_PORT'] = str(port) if port else '0'
env['SR_ASKPASS_OBJECT'] = objname if objname else ''
handle, filepath = tempfile.mkstemp()
os.write(handle, 'U'.encode('ascii'))
os.close(handle)
env['SR_ASKPASS_TEMP'] = filepath
return env
|
a4e06f82338ec9e7445f5319b759f46cbab2f09e
| 19,601 |
import click_completion.core
import click
def install_completion(ctx, attr, value): # pragma: no cover
"""Install completion for the current shell."""
if not value or ctx.resilient_parsing:
return value
shell, path = click_completion.core.install()
click.secho("{0} completion installed in {1}".format(shell, path), fg="green")
ctx.exit()
|
b6c84744161d90cc1d33ac6effd5b7aec083c151
| 19,602 |
from datetime import datetime
def _extractSetsSingleUser(df, time_window):
"""Get activity set and trip set for each individual."""
# total weeks and start week
weeks = (df["endt"].max() - df["startt"].min()).days // 7
start_date = df["startt"].min().date()
aSet = pd.DataFrame([], columns=["userid", "locid", "dur_s", "class", "timeStep"])
tSet = pd.DataFrame([], columns=["userid", "tripid", "length_m", "dur_s", "nloc", "class", "timeStep"])
# construct the sliding week gdf, i is the timestep
for i in range(0, weeks - time_window + 1):
# start and end time
curr_start = datetime.datetime.combine(start_date + datetime.timedelta(weeks=i), datetime.time())
curr_end = datetime.datetime.combine(curr_start + datetime.timedelta(weeks=time_window), datetime.time())
## determine activity set locations
# get the currect time step points gdf
curr_stps = df.loc[(df["startt"] >= curr_start) & (df["endt"] < curr_end) & (df["type"] == "points")]
# extract the activity set (location)
curr_ASet = curr_stps.groupby("locid", as_index=False).apply(_getActLocs, time_window=time_window).dropna()
# if no location, jump to next time step
if curr_ASet.empty:
continue
# result is the locations with stayed duration class
curr_ASet["timeStep"] = i
aSet = aSet.append(curr_ASet)
## determine activity set trips
# select activity set location
curr_ASet = curr_ASet.loc[curr_ASet["class"] > 0]
# get the currect time step trips gdf
curr_t = df.loc[(df["startt"] >= curr_start) & (df["endt"] < curr_end) & (df["type"] == "trips")]
curr_tSet = _getCurrTrips(curr_t, curr_stps, curr_ASet)
# result is the trips that ends at activity set locations
curr_tSet["timeStep"] = i
tSet = tSet.append(curr_tSet)
# clean up
aSet.reset_index(drop=True)
tSet.reset_index(drop=True)
aSet["type"] = "points"
tSet["type"] = "trips"
aSet["userid"] = df["userid"].unique()[0]
tSet["userid"] = df["userid"].unique()[0]
return aSet.append(tSet)
|
40f92857cd5684b8fbf8b01565ede7aeffab1fe8
| 19,603 |
def _ed25519():
"""Edwards curve Ed25519.
Link: https://en.wikipedia.org/wiki/EdDSA#Ed25519
"""
q = 2 ** 255 - 19
order = 2 ** 252 + 27742317777372353535851937790883648493
gf = GF(q)
ed = CurveParams(name="ED25519", order=order, gf=gf, is_cyclic = True)
ed.set_constants(a=gf(-1), d=gf(-121665) / gf(121666))
ed.set_equation(set_edwards_eq(a=ed.a, c=ed.c, d=ed.d))
ed.set_base_pt(
(
gf(
15112221349535400772501151409588531511454012693041857206046113283949847762202
),
gf(4) / gf(5),
)
)
return ed
|
f1a07b9ebcb6033968f0e7d9c66ee2ff71f138e0
| 19,604 |
import json
def from_raw_bytes(raw_bytes):
"""Take raw bytes and turn it into a DmailRequest"""
return from_json(json.loads(raw_bytes.decode(encoding='UTF-8')))
|
01e989dfddcad20125ff608cdfa42673b1c0d0d8
| 19,605 |
def BRepApprox_TheMultiLineToolOfApprox_FirstPoint(*args):
"""
:param ML:
:type ML: BRepApprox_TheMultiLineOfApprox &
:rtype: int
"""
return _BRepApprox.BRepApprox_TheMultiLineToolOfApprox_FirstPoint(*args)
|
26b8a2bffe094a8cc1e41edf56b92b75be75dc37
| 19,606 |
import time
def push_message(token, user, message, **kwargs):
"""
Send message to selected user/group/device.
:param str token: application token
:param str user: user or group id to send the message to
:param str message: your message
:param str title: your message's title, otherwise your app's name is used
:param str device: your user's device name to send the message directly to that device
:param list device: your user's devices names to send the message directly to that device
:param str url: a supplementary URL to show with your message
:param str url_title: a title for your supplementary URL, otherwise just the URL is shown
:param int priority: message priority (Use the Priority class to select)
:param int retry: how often (in seconds) the Pushover servers will retry the notification to the user (required
only with priority level of Emergency)
:param int expire: how many seconds your notification will continue to be retried (required only with priority
level of Emergency)
:param datetime timestamp: a datetime object repr the timestamp of your message's date and time to display to the user
:param str sound: the name of the sound to override the user's default sound choice (Use the Sounds consts to
select)
:param bool html: Enable rendering message on user device using HTML
"""
data_out = {
'token': token,
'user': user, # can be a user or group key
'message': message
}
# Support for non-required parameters of PushOver
if 'title' in kwargs:
data_out['title'] = kwargs['title']
if 'device' in kwargs:
temp = kwargs['device']
if type(temp) == list:
data_out['device'] = ','.join(temp)
else:
data_out['device'] = temp
data_out['device'] = kwargs['device']
if 'url' in kwargs:
data_out['url'] = kwargs['url']
if 'url_title' in kwargs:
data_out['url_title'] = kwargs['url_title']
if 'priority' in kwargs:
data_out['priority'] = kwargs['priority']
# Emergency prioritized messages require 'retry' and 'expire' to be defined
if data_out['priority'] == PRIORITIES.EMERGENCY:
if 'retry' not in kwargs:
raise TypeError('Missing `retry` argument required for message priority of Emergency')
else:
retry_val = kwargs['retry']
# 'retry' val must be a minimum of _MIN_RETRY and max of _MAX_EXPIRE
if not (_MIN_RETRY <= retry_val <= _MAX_EXPIRE):
raise ValueError('`retry` argument must be at a minimum of {} and a maximum of {}'.format(
_MIN_RETRY, _MAX_EXPIRE
))
data_out['retry'] = retry_val
if 'expire' not in kwargs:
raise TypeError('Missing `expire` arguemnt required for message priority of Emergency')
else:
expire_val = kwargs['expire']
# 'expire' val must be a minimum of _MIN_RETRY and max of _MAX_EXPIRE
if not(_MIN_RETRY <= expire_val <= _MAX_EXPIRE):
raise ValueError('`expire` argument must be at a minimum of {} and a maximum of {}'.format(
_MIN_RETRY, _MAX_EXPIRE
))
data_out['expire'] = expire_val
# Optionally a callback url may be supplied for the Emergency Message
if 'callback' in kwargs:
data_out['callback'] = kwargs['callback']
if 'timestamp' in kwargs:
data_out['timestamp'] = int(time.mktime(kwargs['timestamp'].timetuple()))
if 'sound' in kwargs:
data_out['sound'] = kwargs['sound']
if 'html' in kwargs:
data_out['html'] = int(kwargs['html'])
return send(_push_url, data_out=data_out)
|
97b278482fb1ff88eea5f95c45f47563b61c905f
| 19,607 |
def _rrv_div_ ( s , o ) :
"""Division of RooRealVar and ``number''
>>> var = ...
>>> num = ...
>>> res = var / num
"""
if isinstance ( o , _RRV_ ) and not o.isConstant() : o = o.ve ()
elif hasattr ( o , 'getVal' ) : o = o.getVal ()
#
v = s.getVal() if s.isConstant() else s.ve()
#
return v / o
|
bef100fa354dc1e090a7e1cb2ad66bc8c7144d1b
| 19,608 |
def getWindowsAt(x: int, y: int, app: AppKit.NSApplication = None, allWindows=None):
"""
Get the list of Window objects whose windows contain the point ``(x, y)`` on screen
:param x: X screen coordinate of the window(s)
:param y: Y screen coordinate of the window(s)
:param app: (optional) NSApp() object. If passed, returns the list of windows at (x, y) position of given app
:param allWindows: (optional) list of window objects (required to improve performance in Apple Script version)
:return: list of Window objects
"""
matches = []
if not allWindows:
allWindows = getAllWindows(app)
for win in allWindows:
box = win.box
if pointInRect(x, y, box.left, box.top, box.width, box.height):
matches.append(win)
return matches
|
009a4d439e7948fc3829e132118716ea808b5185
| 19,609 |
def find_closest_vertices(surface_coords, point_coords):
"""Return the vertices on a surface mesh closest to some given coordinates.
The distance metric used is Euclidian distance.
Parameters
----------
surface_coords : numpy array
Array of coordinates on a surface mesh
point_coords : numpy array
Array of coordinates to map to vertices
Returns
-------
closest_vertices : numpy array
Array of mesh vertex ids
"""
point_coords = np.atleast_2d(point_coords)
return np.argmin(cdist(surface_coords, point_coords), axis=0)
|
62adc1082d24ff70e8f285a6a457b6bff0768854
| 19,610 |
def blob_utils_get_loss_gradients(model, loss_blobs):
"""Generate a gradient of 1 for each loss specified in 'loss_blobs'"""
loss_gradients = {}
for b in loss_blobs:
loss_grad = model.net.ConstantFill(b, [b + '_grad'], value=1.0)
loss_gradients[str(b)] = str(loss_grad)
return loss_gradients
|
2543dc532469405ad0ae1b1288b9956841565238
| 19,611 |
def _list_indexing(X, key, key_dtype):
""" Index a Python list """
if np.isscalar(key) or isinstance(key, slice):
# key is a slice or a scalar
return X[key]
if key_dtype == 'bool':
# key is a boolean array-like
return list(compress(X, key))
# key is a integer array-like of key
return [X[idx] for idx in key]
|
47a5ae6be9db172c5ac194c7989540c79a27f89f
| 19,612 |
import collections
import itertools
import copy
def extract_hubs_from_motifs(list_of_motifs: list,
genes_to_remove: list,
check_conflict: bool = True,
debug: bool = False,
gene_ids_file: str = None,
top_pc: float = 1.):
"""
Parameters
==========
list_of_motifs: list of lists of all the motifs found.
Each item in the main list is a list of length 2: ['Motif_name', weight]
genes_to_remove: if there are genes to be removed due to ID conflicts or discrepancies
check_conflict: if True, check the overlap between the hubs and the list provided
debug: if printing debug options
gene_ids_file: file with gene_IDs that might generate conflict
top_pc: to % of genes to consider
Returns
=======
hubs: list of all the genes perturbed with their connectivity, and hubs
"""
genes_p = collections.Counter(list(itertools.chain(*[m.split("_") for m in list_of_motifs])))
# this file corrects the list for discrepancies in the list for gene IDs
path_tofile = gene_ids_file
fin = open(path_tofile, mode='r')
genelist = fin.readlines()
genelist = [x.strip() for x in genelist]
fin.close()
if debug:
print("Find top {}% of hubs".format(top_pc))
temp_genes = copy.deepcopy(genes_p)
for g in genes_p:
if g in genes_to_remove and g not in genelist:
if debug:
print("Removing gene for conflict: {}".format(g))
temp_genes.pop(g)
if debug:
print("Hubs before: {} and after: {}".format(len(genes_p), len(temp_genes)))
list_of_genes = sorted(list(map(list, list(temp_genes.items()))), key=itemgetter(1), reverse=True)
if check_conflict:
top_pc = 100-top_pc
top_pc_genes = list(np.array(list_of_genes).T[0][:top_pc])
notpert_hubs = list(set(top_pc_genes) - set(genelist))
nothubs_pert = list(set(genelist) - set(top_pc_genes))
for idx, gene in enumerate(notpert_hubs):
temp1 = [gene, temp_genes[gene]]
temp_genes.pop(gene)
temp2 = [nothubs_pert[idx], temp_genes[nothubs_pert[idx]]]
temp_genes.pop(nothubs_pert[idx])
temp_genes[temp2[0]] = temp1[1]
temp_genes[temp1[0]] = temp2[1]
temp_genes = collections.OrderedDict(sorted(temp_genes.items(), key=itemgetter(1), reverse=True))
full_genes = sorted(list(map(list, list(temp_genes.items()))), key=itemgetter(1), reverse=True)
connectivity = np.array(full_genes).T[1].astype(np.float16)
hubs = list(np.array(list(temp_genes.keys()))[np.where(connectivity > np.percentile(connectivity, top_pc))[0]])
return full_genes, hubs
|
3dc4bc8c8dd67ce0eb88d0f02b0021e575741503
| 19,613 |
import json
import requests
def credit_rating():
"""
credit_rating http api
"""
return_dict = {'rescode': '200', 'credit-rating': '1', 'description': 'Good credit'}
if request.get_data() is None:
return_dict['rescode'] = '5004'
return json.dumps(return_dict, ensure_ascii=False)
role_dict = {'farmer': 1, 'consumer': 2}
sex_dict = {'male': 1, 'female': 2}
location_dict = {'Cantwell city, Alaska, USA': 1, 'Queens, New York, NY, USA': 2}
description_dict = {'0': 'Bad credit', '1': 'Good credit'}
get_data = request.get_data()
get_data = json.loads(get_data)
role_name = get_data.get('rolename')
sex = get_data.get('sex')
user_name = get_data.get('username')
location = get_data.get('location')
carbon_credit = get_data.get('carbon_credit')
footprint_names = get_data.get('footprint_name')
carbon_credit = int(carbon_credit)
footprint_count_dict = {'Buy': 0, 'Fertilize': 0, 'Seed': 0}
for ftn in footprint_names:
if ftn.startswith('Buy'):
footprint_count_dict['Buy'] = footprint_count_dict['Buy'] + 1
elif ftn.startswith('Fertilize'):
footprint_count_dict['Fertilize'] = footprint_count_dict['Fertilize'] + 1
elif ftn.startswith('Seed'):
footprint_count_dict['Seed'] = footprint_count_dict['Seed'] + 1
x_predict_json = {
'x0': sex_dict.get(sex),
'x1': role_dict.get(role_name),
'x2': location_dict.get(location),
'x3': carbon_credit,
'x4': footprint_count_dict['Seed'],
'x5': footprint_count_dict['Buy'],
'x6': footprint_count_dict['Fertilize']
}
value_dict = {'max_x0': 2, 'min_x0': 1, 'max_x1': 2, 'min_x1': 1, 'max_x2': 2, 'min_x2': 1, 'max_x3': 99,
'min_x3': 0, 'max_x4': 30, 'min_x4': 0, 'max_x5': 30, 'min_x5': 0, 'max_x6': 30, 'min_x6': 0}
for i in range(7):
x_predict_json['x' + str(i)] = normalization(x_predict_json['x' + str(i)], value_dict['max_x' + str(i)],
value_dict['min_x' + str(i)])
body_json = {
"head": {
"serviceId": "cfc"
},
"body": {
'featureData': x_predict_json,
'sendToRemoteFeatureData': {
'device_id': user_name
}
}
}
# guest node ip
response = requests.post(
'http://IP:8059/federation/v1/inference',
data=json.dumps(body_json))
response_data = json.loads(response.text).get('data')
prob = response_data.get('prob')
flag = "0"
if float(prob) > 0.4:
flag = "1"
return_dict['credit-rating'] = flag
return_dict['description'] = description_dict[flag]
return json.dumps(return_dict, ensure_ascii=False)
|
06a12b6f0801a1b56b17eb53ec4009c4ab5777f5
| 19,614 |
def updateGlobalInventory(D_SKUs: pd.DataFrame, inventoryColumn: str):
"""
Update the global inventory of the warehouse
Args:
D_SKUs (pd.DataFrame): Input SKUs dataframe.
inventoryColumn (str): column name with the inventory.
Returns:
D_inventory (pd.DataFrame): Output DataFrame with inventory values.
"""
D_inventory = pd.DataFrame([], columns=['WH_INVENTORY_VOLUME', 'WH_INVENTORY_NORMALISED'])
givenVolumes = 0 # count the number of SKUs with a given volume
for i in range(0, len(D_SKUs)):
# i=33159
volume = D_SKUs.iloc[i]['VOLUME']
list_days = D_SKUs.iloc[i]['INVENTORY_DAYS']
# go on only if an inventory has been saved
if isinstance(list_days, list):
list_inventory = np.array(D_SKUs.iloc[i][inventoryColumn])
list_inventory = np.nan_to_num(list_inventory) # convert nan to 0
list_inventory_volume = list_inventory * volume
list_inventory_normalised = (list_inventory - min(list_inventory)) / (max(list_inventory) - min(list_inventory))
D_temp = pd.DataFrame(list_inventory_normalised, index=list_days, columns=['SKU_INVENTORY_NORMALISED'])
D_inventory = pd.concat([D_temp, D_inventory], axis=1, sort=False)
D_inventory = D_inventory.fillna(0)
D_inventory['WH_INVENTORY_NORMALISED'] = D_inventory['WH_INVENTORY_NORMALISED'] + D_inventory['SKU_INVENTORY_NORMALISED']
D_inventory = D_inventory.drop(columns=['SKU_INVENTORY_NORMALISED'])
if str(volume) != 'nan': # if volume is not nan
D_temp = pd.DataFrame(list_inventory_volume, index=list_days, columns=['SKU_INVENTORY_VOLUME'])
D_inventory = pd.concat([D_temp, D_inventory], axis=1, sort=False)
D_inventory = D_inventory.fillna(0)
D_inventory['WH_INVENTORY_VOLUME'] = D_inventory['WH_INVENTORY_VOLUME'] + D_inventory['SKU_INVENTORY_VOLUME']
D_inventory = D_inventory.drop(columns=['SKU_INVENTORY_VOLUME'])
givenVolumes = givenVolumes + 1
return D_inventory
|
7e9d824de8830b40a88ae5fbbffac89e69a57869
| 19,615 |
from datetime import datetime
def _query_checks(start, end, owner_id=''):
"""Get the number of rules checks from `start` to `end` in 1-day windows"""
series = []
assert (isinstance(end, datetime.datetime) and
isinstance(start, datetime.datetime))
while start < end:
stop = start + datetime.timedelta(days=1)
results = _query_influxdb(
_get_checks_or_datapoints_query('checks',
start, stop, owner_id), owner_id
)
series.append(('%sZ' % start.isoformat(), results))
start += datetime.timedelta(days=1)
return _parse_checks_or_datapoints_series(series, 'checks', owner_id)
|
9df5e7dd9a6ea2f2bb1f4f1a6a89db6b16d6814b
| 19,616 |
def _FilterManufacturedEvents(results):
"""Return a list of results where first question is 'MANUFACTURED'.
Manufactured events are either Recording events that correspond to
an instrumented event in the browser, or Showed notification events
that correspond to when the user was invited to take a survey.
Args:
results: Results parsed from JSON. Assumed to already be filtered by date.
Returns:
(1) List of results that are manufactured events.
(2) Integer index into the results list indicating which list
element's questions can be considered canonical and complete.
"""
manuf_events = [
r for r in results
if r['responses'][0]['question'] == 'MANUFACTURED']
return manuf_events, _GetCanonicalIndex(manuf_events)
|
51fb34402e17249b63b8061bf70a2879925c8fba
| 19,617 |
def Max(data):
"""Returns the maximum value of a time series"""
return data.max()
|
0d4781da4384eae65de4e13860995848ae8de678
| 19,618 |
def clean_words(words, remove_stopwords=False, language='portuguese'):
"""Stems and removes stopwords from a set of word-level tokens using the RSLPStemmer.
Args:
words (list): Tokens to be stemmed.
remove_stopwords (bool): Whether stopwords should be removed or not.
language (str): Identifier of stopwords' language.
Returns:
List of stemmed tokens.
"""
# Creates the RSLP stemmer
stemmer = RSLPStemmer()
if remove_stopwords:
# Gathers the stopwords
stop_words = stopwords.words(language)
# Stems and removes the stopwords
stemmed_words = [stemmer.stem(word) for word in words if word.lower() not in stop_words]
else:
# Just stems the words
stemmed_words = [stemmer.stem(word) for word in words]
return stemmed_words
|
4dd721a691e832dc3b8160c678fe7e1c05b6a015
| 19,619 |
import os
import pickle
def load_model(model_dir, model_file=None):
"""Loads the model.
The model object is pickled in `model_dir` to make the model configuration
optional for future runs.
Args:
model_dir: The model directory.
model_file: An optional model configuration.
Returns:
A `opennmt.models.Model` object.
"""
serial_model_file = os.path.join(model_dir, "model_description.pkl")
if model_file:
if tf.train.latest_checkpoint(model_dir) is not None:
tf.logging.warn(
"You provided a model configuration but a checkpoint already exists. "
"The model configuration must define the same model as the one used for "
"the initial training. However, you can change non structural values like "
"dropout.")
model_config = load_model_module(model_file)
model = model_config.model()
with open(serial_model_file, "wb") as serial_model:
pickle.dump(model, serial_model)
elif not os.path.isfile(serial_model_file):
raise RuntimeError("A model configuration is required.")
else:
tf.logging.info("Loading serialized model description from %s", serial_model_file)
with open(serial_model_file, "rb") as serial_model:
model = pickle.load(serial_model)
return model
|
437fd8378113db11b9afa5133d57d6936ab722fa
| 19,620 |
def parse_healing_and_target(line):
"""Helper method that finds the amount of healing and who it was provided to"""
split_line = line.split()
target = ' '.join(split_line[3:split_line.index('for')])
target = target.replace('the ', '')
amount = int(split_line[split_line.index('for')+1])
return [amount, target]
|
3f11c0807ab87d689e47a79fc7e12b32c00dbd95
| 19,621 |
from typing import Optional
from typing import Sequence
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
image_size: Sequence[int] = (224, 224),
) -> tf.Tensor:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
jpeg_shape = get_shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
# Pad the image with at least 32px on the short edge and take a
# crop that maintains aspect ratio.
scale = tf.minimum(tf.cast(image_height, tf.float32) / (image_size[0] + 32),
tf.cast(image_width, tf.float32) / (image_size[1] + 32))
padded_center_crop_height = tf.cast(scale * image_size[0], tf.int32)
padded_center_crop_width = tf.cast(scale * image_size[1], tf.int32)
offset_height = ((image_height - padded_center_crop_height) + 1) // 2
offset_width = ((image_width - padded_center_crop_width) + 1) // 2
crop_window = [offset_height, offset_width,
padded_center_crop_height, padded_center_crop_width]
image = crop(image_bytes, crop_window)
return image
|
7a7ad3eb36099d560da126011845426bdcd1f326
| 19,622 |
def inq_affine(inp, n_outmaps, base_axis=1, num_bits=4,
inq_iterations=(), selection_algorithm='random',
seed=-1, w_init=None, i_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True):
"""Incremental Network Quantization Affine Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (~nnabla.initializer.BaseInitializer): Initializer for the weight.
i_init (~nnabla.initializer.BaseInitializer): Initializer for the indicators (0 ... learnable, 1 ... fixed).
b_init (~nnabla.initializer.BaseInitializer): Initializer for the bias.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
fan_in = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng)
if i_init is None:
fan_in = np.prod(inp.shape[base_axis:])
i_init = ConstantInitializer()
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, not fix_parameters)
i = get_parameter_or_create(
"I", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
i_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, not fix_parameters)
return F.inq_affine(inp, w, i, b, base_axis, num_bits, inq_iterations, selection_algorithm, seed)
|
7654d925a13276002419c9d08342e4e7974bdc31
| 19,623 |
def is_greater_equal(min_value):
"""Check if the attribute value is greater than or equal to a minimum value.
This validator can handle both lists and single element attributes. If it
is a list, it checks if the element with the smallest value is greater than
or equal to the specified minimum value.
"""
def compare(self, attribute, value):
if type(value) is not list:
value = [value]
if np.min(value) < min_value:
_logger.error(
f"{attribute.name} cannot be smaller than {min_value}!",
_logger.ExceptionTypes.ValueError,
)
return compare
|
a95cc24afcae6d11689b872f2178f6b38b864ca7
| 19,624 |
from typing import List
def save_lyrics(list_: List[Text], location: Text) -> None:
"""Writes 'list_' to 'location' as txt file. Returns None."""
with open(location, "w+") as f:
for element in list_:
f.write(element)
f.write("\n")
return None
|
1fff7cf838fdaea32f6875beab90b172a84f379c
| 19,625 |
def translate_provider_for_icon(sync_server, project, site):
"""
Get provider for 'site'
This is used for getting icon, 'studio' should have different icon
then local sites, even the provider 'local_drive' is same
"""
if site == sync_server.DEFAULT_SITE:
return sync_server.DEFAULT_SITE
return sync_server.get_provider_for_site(site=site)
|
58867a6dd44c83582d85fc1baf48121eff714232
| 19,626 |
def server_delete_ip(body=None): # noqa: E501
"""delete server IP
Send by server during shutdown. # noqa: E501
:param body: port of iperf server. Ip and time could be emply
:type body: dict | bytes
:rtype: List[InlineResponse200]
"""
if connexion.request.is_json:
body = ServerAddr.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
|
92b2f425ae7cca1e3e42c58382f3d21b2e96f016
| 19,627 |
import re
def extract_key_and_index(field):
"""Returns the key type, key name and if key is a compound list then returns the index pointed by the field
Arguments:
field: csv header field
"""
for key_type, value in KEY_TYPES.items():
regex = re.compile(value["regex"])
match = regex.match(field)
if match:
return tuple([key_type] + list(match.groups()))
return None
|
aba66922117cd14f2c670df839c3ca522856caa3
| 19,628 |
import torch
def as_mask(indexes, length):
"""
Convert indexes into a binary mask.
Parameters:
indexes (LongTensor): positive indexes
length (int): maximal possible value of indexes
"""
mask = torch.zeros(length, dtype=torch.bool, device=indexes.device)
mask[indexes] = 1
return mask
|
0235d66f9ee5bdc7447819122b285d29efd238c9
| 19,629 |
def interpolate_array(x, y, smooth_rate=500):
"""
:param x:
:param y:
:return:
"""
interp_obj = interpolate.PchipInterpolator(x, y)
new_x = np.linspace(x[0], x[-1], smooth_rate)
new_y = interp_obj(new_x)
return new_x, new_y
|
4c6f79c3071496e6314d772651d2e6cc6a449c74
| 19,630 |
from typing import Sequence
from typing import Counter
import copy
def calculate_resource_utilization_for_slaves(
slaves: Sequence[_SlaveT], tasks: Sequence[MesosTask]
) -> ResourceUtilizationDict:
""" Given a list of slaves and a list of tasks, calculate the total available
resource available in that list of slaves, and the resources consumed by tasks
running on those slaves.
:param slaves: a list of slaves to calculate resource usage for
:param tasks: the list of tasks running in the mesos cluster
:returns: a dict, containing keys for "free" and "total" resources. Each of these keys
is a ResourceInfo tuple, exposing a number for cpu, disk and mem.
"""
resource_total_dict: _Counter[str] = Counter()
for slave in slaves:
filtered_resources = filter_mesos_state_metrics(slave["resources"])
resource_total_dict.update(Counter(filtered_resources))
resource_free_dict = copy.deepcopy(resource_total_dict)
for task in tasks:
task_resources = task["resources"]
resource_free_dict.subtract(Counter(filter_mesos_state_metrics(task_resources)))
for slave in slaves:
filtered_resources = filter_mesos_state_metrics(
reserved_maintenence_resources(slave["reserved_resources"])
)
resource_free_dict.subtract(Counter(filtered_resources))
return {
"free": ResourceInfo(
cpus=resource_free_dict["cpus"],
disk=resource_free_dict["disk"],
mem=resource_free_dict["mem"],
gpus=resource_free_dict.get("gpus", 0),
),
"total": ResourceInfo(
cpus=resource_total_dict["cpus"],
disk=resource_total_dict["disk"],
mem=resource_total_dict["mem"],
gpus=resource_total_dict.get("gpus", 0),
),
"slave_count": len(slaves),
}
|
13b4856b3ef0bdf06410a58ecc0fbc29bfda4483
| 19,631 |
def check_pass(value):
"""
This test always passes (it is used for 'checking' things like the
workshop address, for which no sensible validation is feasible).
"""
return True
|
aa3a5f536b5bc729dc37b7f09c3b997c664b7481
| 19,632 |
def state_array_to_int(s):
"""translates a state s into an integer by interpreting the state as a
binary represenation"""
return int(state_array_to_string(s), 2)
|
b0b50dd879b74af27946cde49e1bf805c2d6e504
| 19,633 |
import asyncio
import traceback
def async_task(coro, loop=asyncio.get_event_loop(), error_cb=None):
"""
Wrapper to always print exceptions for asyncio tasks.
"""
future = asyncio.ensure_future(coro)
def exception_logging_done_cb(future):
try:
e = future.exception()
except asyncio.CancelledError:
return
if e is not None:
log.critical('Unhandled exception in async future: {}: {}\n{}',
type(e).__name__, e, ''.join(traceback.format_tb(e.__traceback__)))
if error_cb is not None:
error_cb()
loop.call_exception_handler({
'message': 'Unhandled exception in async future',
'future': future,
'exception': e,
})
future.add_done_callback(exception_logging_done_cb)
return future
|
5520aafebca17cbe32c79b69e41856f6076179f3
| 19,634 |
def is_valid_charts_yaml(content):
"""
Check if 'content' contains mandatory keys
:param content: parsed YAML file as list of dictionary of key values
:return: True if dict contains mandatory values, else False
"""
# Iterate on each list cell
for chart_details in content:
# If one of the keys is missing or, is None
if not all(chart_details.get(x) is not None
and x in chart_details
for x in ['chart_name', 'helm_repo_name', 'name_space', 'values_file', 'private_image']):
return False
# If one of the keys is not a string
if not all(type(chart_details.get(x)) is str
for x in ['chart_name', 'helm_repo_name', 'name_space', 'values_file']):
return False
# If one of the keys is not a boolean
if not all(type(chart_details.get(x)) is bool
for x in ['private_image']):
return False
if not all(type(chart_details.get(x)) is list
for x in ['extra_executes']):
return False
return True
|
cc68ba6bc9166f8d2f8c37da756accec667f471a
| 19,635 |
def get_trader_fcas_availability_agc_status_condition(params) -> bool:
"""Get FCAS availability AGC status condition. AGC must be enabled for regulation FCAS."""
# Check AGC status if presented with a regulating FCAS offer
if params['trade_type'] in ['L5RE', 'R5RE']:
# AGC is active='1', AGC is inactive='0'
return True if params['agc_status'] == '1' else False
# Return True if a presented with a contingency FCAS offer (AGC doesn't need to be enabled)
else:
return True
|
fa73ae12a0934c76f12c223a05161280a6dc01f1
| 19,636 |
import importlib
def load_class(full_class_string):
"""
dynamically load a class from a string
"""
class_data = full_class_string.split(".")
module_path = ".".join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
# Finally, we retrieve the Class
return getattr(module, class_str)
|
95ab0a0b27d508b5bd41468cf6a4e3c008799fdf
| 19,637 |
def request_credentials_from_console():
"""
Requests the credentials interactive and returns them in form (username, password)
"""
username = raw_input('Username: ')
password = raw_input('Password: ')
return username, password
|
43102d8528502f700ab96714831c94abb6a3b0f8
| 19,638 |
def prettify_url(url):
"""Return a URL without its schema
"""
if not url:
return url
split = url.split('//', 1)
if len(split) == 2:
schema, path = split
else:
path = url
return path.rstrip('/')
|
0beed0522355e4ea8170cac22e61f92e0c21ccca
| 19,639 |
def CDLMORNINGDOJISTAR(data: xr.DataArray, penetration: float = 0.3) -> xr.DataArray:
"""
Morning Doji Star (Pattern Recognition)
Inputs:
data:['open', 'high', 'low', 'close']
Parameters:
penetration: 0.3
Outputs:
double series (values are -1, 0 or 1)
"""
return multiple_series_call(talib.CDLMORNINGDOJISTAR, data, ds.TIME, ds.FIELD, [f.OPEN, f.HIGH, f.LOW, f.CLOSE],
[penetration], result_divider=100)
|
be1f4954dda5109d067070fc97c340233085b043
| 19,640 |
def panerror_to_dict(obj):
"""Serializer function for POCS custom exceptions."""
name_match = error_pattern.search(str(obj.__class__))
if name_match:
exception_name = name_match.group(1)
else:
msg = f"Unexpected obj type: {obj}, {obj.__class__}"
raise ValueError(msg)
return {"__class__": "PanError",
"exception_name": exception_name,
"args": obj.args}
|
8841d2c4b6f3ba1deae5057a6b85b70830c412a1
| 19,641 |
from typing import Optional
def build_class_instance(module_path: str, init_params: Optional[dict] = None):
"""
Create an object instance from absolute module_path string.
Parameters
----------
module_path: str
Full module_path that is valid for your project or some external package.
init_params: optional dict
These parameters will be used as init parameters for the given type.
Returns
-------
Some object instance
"""
class_ = get_type_from_module_path(module_path=module_path)
result = class_(**(init_params or {}))
return result
|
d50ffdbb8bbeed36b572e6e555376febabecb745
| 19,642 |
def maintainers_mapper(maintainers, package):
"""
Update package maintainers and return package.
https://docs.npmjs.com/files/package.json#people-fields-author-contributors
npm also sets a top-level "maintainers" field with your npm user info.
"""
# note this is the same code as contributors_mappers... should be refactored
maintains = []
if isinstance(maintainers, list):
for contrib in maintainers:
name, email, url = parse_person(contrib)
maintains.append(models.Party(type=models.party_person, name=name, email=email, url=url))
else: # a string or dict
name, email, url = parse_person(maintainers)
maintains.append(models.Party(type=models.party_person, name=name, email=email, url=url))
package.maintainers = maintains
return package
|
57e76ec2cbc1727778bfe980c5cd6d82798a1800
| 19,643 |
def calc_original_pressure(pressure_ratio):
"""
calculates the original pressure value given the <code>AUDITORY_THRESHOLD</code>.
The results are only correct if the pressure ratio is build using the <code>AUDITORY_THRESHOLD</code>.
:param pressure_ratio: the pressure ration that shall be converted to the original value
:return: the pressure value the ratio is based on
"""
return pressure_ratio * (AUDITORY_THRESHOLD ** 2)
|
1a0680073cb739fef47952887e6dedf4487f2aa0
| 19,644 |
def normalize_field_names(fields):
"""
Map field names to a normalized form to check for collisions like 'coveredText' vs 'covered_text'
"""
return set(s.replace('_','').lower() for s in fields)
|
55bdac50fd1fcf23cfec454408fbcbbae96e507e
| 19,645 |
import os
import itertools
def which(program): # type: (str) -> str
"""Like UNIX which, returns the first location of a program given using your system PATH
If full location to program is given, uses that first"""
dirname, progname = os.path.split(program) # type: str, str
syspath = tuple([dirname] + os.environ['PATH'].split(os.pathsep)) # type: Tuple[str]
syspath = tuple(filter(None, syspath)) # type: tuple[str]
progpath = map(os.path.join, syspath, itertools.repeat(progname, len(syspath))) # type: map[str]
try:
extensions = tuple([''] + os.environ.get('PATHEXT').split(os.pathsep)) # type: Tuple[str]
progpath = map(lambda t: ''.join(t), itertools.product(progpath, extensions)) # type: map[str]
except AttributeError:
pass
progpath = tuple(filter(lambda e: os.path.isfile(e) and os.access(e, os.X_OK), progpath)) # type: Tuple[str]
if not progpath:
raise ValueError("Cannot find program '%s' in your PATH" % program)
return progpath[0]
|
61748b6b9aef948f4d570791d432de0c69d815d9
| 19,646 |
def powspec_disc_n(n, fs, mu, s, kp, km, vr, vt, tr):
"""Return the n'th Lorentzian and its width"""
Td = ifana.LIF().Tdp(mu, s, vr, vt) + tr
Ppp = (kp*exp(-(kp+km)*tr)+km)/(kp+km)
kpbar = (kp*(Td-tr)-log(Ppp))/Td
return 1./Td * 2*kpbar/(kpbar**2 + (2*pi*(fs - n*1./Td))**2), kpbar
|
60335e496b734c32116424d857a3c59de7f5b567
| 19,647 |
def list_keys(request):
"""
Tags: keys
---
Lists all added keys.
READ permission required on key.
---
"""
auth_context = auth_context_from_request(request)
return filter_list_keys(auth_context)
|
564d03457265c0461e82b55abfb23cb4d45ad0ac
| 19,648 |
def rounder(money_dist: list, pot: int, to_coin: int = 2) -> list:
"""
Rounds the money distribution while preserving total sum
stolen from https://stackoverflow.com/a/44740221
"""
def custom_round(x):
""" Rounds a number to be divisible by to_coin specified """
return int(to_coin * round(x / to_coin))
rs = [custom_round(x) for x in money_dist]
k = pot - sum(rs)
assert k == custom_round(k)
fs = [x - custom_round(x) for x in money_dist]
indices = [
i
for order, (e, i) in enumerate(
reversed(sorted((e, i) for i, e in enumerate(fs)))
)
if order < k
]
return [r + 1 if i in indices else r for i, r in enumerate(rs)]
|
f315027def4646252aa7d4ee7c05ca3085625583
| 19,649 |
def find_wcscorr_row(wcstab, selections):
""" Return an array of indices from the table (NOT HDU) 'wcstab' that matches the
selections specified by the user.
The row selection criteria must be specified as a dictionary with
column name as key and value(s) representing the valid desired row values.
For example, {'wcs_id':'OPUS','extver':2}.
"""
mask = None
for i in selections:
bmask = (wcstab.field(i) == selections[i])
if mask is None:
mask = bmask.copy()
else:
mask = np.logical_and(mask,bmask)
del bmask
return mask
|
dcd2b4025ec3756319911e6626dd403a6efda1c4
| 19,650 |
def process_image(image_file):
""" 다섯 단계의 이미지 처리(Image precessing)를 힙니다.
현재 함수에서 순서를 변경하여 적용할 수 있습니다.
1) Gray-scale 적용
2) Morph Gradient 적용
3) Threshold 적용
4) Long Line Removal 적용
5) Close 적용
6) Contour 추출
:param image_file: 이미지 처리(Image precessing)를 적용할 이미지 파일
:return: 이미지 처리 후 글자로 추정되는 부분을 잘라낸 이미지 리스트
"""
image_origin = open_original(image_file)
# todo input 사이즈가 일정 수준 이하일 경우 cv2.pyrUp() 으로 사이즈를 확장할 수 있도록 자동화하기
# todo 아니면 설정파일에서 사이즈업 할지말지를 선택할 수 있도록 하기 (configs.yml)
# image_origin = cv2.pyrUp(image_origin) # size up ( x4 ) 이미지 크기가 작을 경우 이미지 사이즈업 해야합니다.
# Grey-Scale
image_gray = get_gray(image_origin)
# Morph Gradient
image_gradient = get_gradient(image_gray)
# Threshold
image_threshold = get_threshold(image_gradient)
# Long line remove
image_line_removed = remove_long_line(image_threshold)
# Morph Close
image_close = get_closing(image_line_removed)
contours = get_contours(image_close)
return get_cropped_images(image_origin, contours)
|
3185487505cd26db64fb294cef5bb1a26c7b5482
| 19,651 |
import urllib
def _gftRead(url, step):
"""
Reads in a gtf file from a specific db given the url.
Some gft have a certain number of header lines that
are skipped however.
Input: url where gtf is fetched from
Input: number of lines to skip while reading in the frame
Output: gtf in a pandas frame
"""
urllib.request.urlretrieve(url, "/tmp/conversion.gtf.gz")
gtf = pd.read_csv("/tmp/conversion.gtf.gz",
compression = "gzip",
engine = "python",
delimiter = '\t',
skiprows = step,
header = None)
return gtf
|
ef05d2747188def526612bdd27931e0420e275dd
| 19,652 |
def add() -> jsonify:
"""
Adds a new item in the server and returns the updated list to the front-end
"""
# Passed Items from Front-End
name = request.form['name']
priority = request.form['priority']
price = request.form['price'].replace(",", "") # To prevent string to float conversion
money = request.form['money']
# Adds item to the server and check the status of the addition
is_right = mysqlcommands.add_item(name, priority, price, money)
# Pass the status of the addition to this variable
message = constants.ADD_ITEM_SUCCESS_MESSAGE if is_right else constants.ADD_ITEM_FAILURE_MESSAGE
# Get the content from the JSON file
my_obj = jsoncommands.get_json(constants.JSON_FILE_PATH)
# Re-allocate the budget with the new added item
utilities.money_allocation(mysqlcommands.get_all_items(), my_obj['Total'] * my_obj['Percentage'],
mysqlcommands.priority_count())
return jsonify({
"color": is_right,
"message": message,
"allItems": mysqlcommands.get_all_items()
})
|
e8a32aa47ee057c6f9653554955cddd0b003ef1a
| 19,653 |
import copy
import os
def _get_base(**kwargs):
"""
If the needed base does not exist, then create it, if it does exist
create nothing and return the name of the base lxc container so
it can be cloned.
"""
profile = get_container_profile(copy.deepcopy(kwargs.get("profile")))
kw_overrides = copy.deepcopy(kwargs)
def select(key, default=None):
kw_overrides_match = kw_overrides.pop(key, _marker)
profile_match = profile.pop(key, default)
# let kwarg overrides be the preferred choice
if kw_overrides_match is _marker:
return profile_match
return kw_overrides_match
template = select("template")
image = select("image")
vgname = select("vgname")
path = kwargs.get("path", None)
# remove the above three variables from kwargs, if they exist, to avoid
# duplicates if create() is invoked below.
for param in ("path", "image", "vgname", "template"):
kwargs.pop(param, None)
if image:
proto = _urlparse(image).scheme
img_tar = __salt__["cp.cache_file"](image)
img_name = os.path.basename(img_tar)
hash_ = salt.utils.hashutils.get_hash(
img_tar, __salt__["config.get"]("hash_type")
)
name = "__base_{0}_{1}_{2}".format(proto, img_name, hash_)
if not exists(name, path=path):
create(
name, template=template, image=image, path=path, vgname=vgname, **kwargs
)
if vgname:
rootfs = os.path.join("/dev", vgname, name)
edit_conf(
info(name, path=path)["config"],
out_format="commented",
**{"lxc.rootfs": rootfs}
)
return name
elif template:
name = "__base_{0}".format(template)
if not exists(name, path=path):
create(
name, template=template, image=image, path=path, vgname=vgname, **kwargs
)
if vgname:
rootfs = os.path.join("/dev", vgname, name)
edit_conf(
info(name, path=path)["config"],
out_format="commented",
**{"lxc.rootfs": rootfs}
)
return name
return ""
|
0b1c966669fe6027f89b62461ee5fd3b43398e53
| 19,654 |
def calculate(formula, **params):
""" Calculate formula and return a dictionary of coin and amounts """
formula = Formula.get(formula)
if formula is None:
raise InvalidFormula(formula)
if not formula.expression:
return {}
return calculate_expression(formula.expression, formula, **params)
|
e8a237d2677581296bb1491badecf83264c0d44a
| 19,655 |
import torch
def pack_batch_tensor(inputs):
"""default pad_ids = 0
"""
input_max_length = max([d.size(0) for d in inputs])
# prepare batch tensor
input_ids = torch.LongTensor(len(inputs), input_max_length).zero_()
input_mask = torch.LongTensor(len(inputs), input_max_length).zero_()
for i, d in enumerate(inputs):
input_ids[i, :d.size(0)].copy_(d)
input_mask[i, :d.size(0)].fill_(1)
return {
"input_ids":input_ids,
"input_mask":input_mask,
}
|
33e59acbc8facaf41064e2dd7031bdd314211878
| 19,656 |
def build_network(network_class=None,
dataset_dirs_args=None,
dataset_dirs_class=None,
dataset_dirs=None,
dataset_spec_args=None,
dataset_spec_class=None,
dataset_spec=None,
network_spec_args=None,
network_spec_class=None,
network_spec=None,
json_spec_path=None,
spec_cont=None,
class_priority=False):
"""
build network
"""
# build network specification
network_spec = ivy.default(
network_spec,
build_network_specification(
dataset_dirs_args=dataset_dirs_args,
dataset_dirs_class=dataset_dirs_class,
dataset_dirs=dataset_dirs,
dataset_spec_args=dataset_spec_args,
dataset_spec_class=dataset_spec_class,
dataset_spec=dataset_spec,
network_spec_args=network_spec_args,
network_spec_class=network_spec_class,
json_spec_path=json_spec_path,
spec_cont=spec_cont))
# override network_class if specified in network_spec
network_class = ivy.default(ivy.default(
_import_arg_specified_class_if_present(network_spec, 'network_class'),
network_class, rev=class_priority),
None)
# verify network_class exists
if not ivy.exists(network_class):
raise Exception('network_class must either be specified in this build_network() method,'
'or network_class attribute must be specified in the network_spec instance')
# network
return network_class(network_spec)
|
4fae693408b385629c6ae645c69778276c16b915
| 19,657 |
import os
import nipype
import nipype.pipeline as pe
import nipype.interfaces.utility as utility
import PUMI.func_preproc.info.info_get as info_get
import PUMI.utils.utils_convert as utils_convert
import nipype.interfaces.afni as afni
import PUMI.utils.globals as globals
import io
def slt_workflow(slicetiming_txt="alt+z",SinkTag="func_preproc",wf_name="slicetiming_correction"):
"""
Modified version of porcupine generated slicetiming code:
`source: -`
Creates a slice time corrected functional image.
Workflow inputs:
:param func: The reoriented functional file.
:param SinkDir:
:param SinkTag: The output directory in which the returned images (see workflow outputs) could be found in a subdirectory directory specific for this workflow.
Workflow outputs:
:return: slt_workflow - workflow
Balint Kincses
[email protected]
2018
"""
# This is a Nipype generator. Warning, here be dragons.
# !/usr/bin/env python
SinkDir = os.path.abspath(globals._SinkDir_ + "/" + SinkTag)
if not os.path.exists(SinkDir):
os.makedirs(SinkDir)
# Basic interface class generates identity mappings
inputspec = pe.Node(utility.IdentityInterface(fields=['func',
'slicetiming_txt']),
name='inputspec')
inputspec.inputs.func = func
inputspec.inputs.slicetiming_txt = slicetiming_txt
# Custom interface wrapping function TR
#NodeHash_6000004b9860 = pe.MapNode(interface=info_get.TR, name='NodeName_6000004b9860', iterfield=['in_file'])
TRvalue = pe.Node(interface=info_get.TR,
name='TRvalue')
# Custom interface wrapping function Str2Float
func_str2float = pe.Node(interface=utils_convert.Str2Float,
name='func_str2float')
# Custom interface wrapping function Float2Str
func_str2float_2 = pe.Node(interface=utils_convert.Float2Str,
name='func_str2float_2')
# Wraps command **3dTshift**
sltcor = pe.Node(interface=afni.TShift(),
name='sltcor')
sltcor.inputs.rltplus = True
sltcor.inputs.outputtype = "NIFTI_GZ"
#sltcor.inputs.terminal_output = 'allatonce'
# Basic interface class generates identity mappings
outputspec = pe.Node(utility.IdentityInterface(fields=['slicetimed', 'TR']),
name='outputspec')
#todo: qc timeseries
# Custom interface wrapping function JoinVal2Dict
#func_joinval2dict = pe.Node(interface=utils_convert.JoinVal2Dict,
# name='func_joinval2dict')
# Generic datasink module to store structured outputs
ds = pe.Node(interface=io.DataSink(),
name='ds')
ds.inputs.base_directory = SinkDir
#ds.inputs.regexp_substitutions = [("func_slicetimed/_NodeName_.{13}", "")]
# Create a workflow to connect all those nodes
analysisflow = nipype.Workflow(wf_name)
analysisflow.connect(inputspec, 'slicetiming_txt', sltcor, 'tpattern')
analysisflow.connect(func_str2float, 'float', outputspec, 'TR')
analysisflow.connect(inputspec, 'func', sltcor, 'in_file')
analysisflow.connect(inputspec, 'func', TRvalue, 'in_file')
analysisflow.connect(func_str2float_2, 'str', sltcor, 'tr')
analysisflow.connect(TRvalue, 'TR', func_str2float_2, 'float')
#analysisflow.connect(ds, 'out_file', func_joinval2dict, 'keys')
#analysisflow.connect(func_str2float, 'float', func_joinval2dict, 'vals')
analysisflow.connect(TRvalue, 'TR', func_str2float, 'str')
analysisflow.connect(sltcor, 'out_file', ds, 'slicetimed')
analysisflow.connect(sltcor, 'out_file', outputspec, 'slicetimed')
return analysisflow
|
9b813b8e32c17dae58e5dc552404c0cf1ee5a8a6
| 19,658 |
def lookup_service_root(service_root):
"""Dereference an alias to a service root.
A recognized server alias such as "staging" gets turned into the
appropriate URI. A URI gets returned as is. Any other string raises a
ValueError.
"""
if service_root == EDGE_SERVICE_ROOT:
# This will trigger a deprecation warning and use production instead.
service_root = 'edge'
return _dereference_alias(service_root, service_roots)
|
8cc5384ba26639438e4c7e18264fb39ee2445fcf
| 19,659 |
import warnings
def get_initial_configuration():
"""
Return (pos, type)
pos: (1, 1) - (9, 9)
type will be 2-letter strings like CSA format.
(e.g. "FU", "HI", etc.)
"""
warnings.warn(
"""get_initial_configuration() returns ambiguous cell state.
Use get_initial_configuration_with_dir() instead.""",
DeprecationWarning)
initial_state_top = {
(1, 1): "KY",
(2, 1): "KE",
(3, 1): "GI",
(4, 1): "KI",
(5, 1): "OU",
(6, 1): "KI",
(7, 1): "GI",
(8, 1): "KE",
(9, 1): "KY",
(2, 2): "KA",
(8, 2): "HI",
(1, 3): "FU",
(2, 3): "FU",
(3, 3): "FU",
(4, 3): "FU",
(5, 3): "FU",
(6, 3): "FU",
(7, 3): "FU",
(8, 3): "FU",
(9, 3): "FU",
}
initial_state = {}
for (pos, ty) in initial_state_top.items():
x, y = pos
initial_state[pos] = ty
initial_state[(10 - x, 10 - y)] = ty
return initial_state
|
c96f7f70e258d09090abeffc9815082265245cf2
| 19,660 |
def request_pull_to_diff_or_patch(
repo, requestid, username=None, namespace=None, diff=False
):
"""Returns the commits from the specified pull-request as patches.
:arg repo: the `pagure.lib.model.Project` object of the current pagure
project browsed
:type repo: `pagure.lib.model.Project`
:arg requestid: the identifier of the pull-request to convert to patch
or diff
:type requestid: int
:kwarg username: the username of the user who forked then project when
the project viewed is a fork
:type username: str or None
:kwarg namespace: the namespace of the project if it has one
:type namespace: str or None
:kwarg diff: a boolean whether the data returned is a patch or a diff
:type diff: boolean
:return: the patch or diff representation of the specified pull-request
:rtype: str
"""
repo = flask.g.repo
if not repo.settings.get("pull_requests", True):
flask.abort(404, description="No pull-requests found for this project")
request = pagure.lib.query.search_pull_requests(
flask.g.session, project_id=repo.id, requestid=requestid
)
if not request:
flask.abort(404, description="Pull-request not found")
if request.remote:
repopath = pagure.utils.get_remote_repo_path(
request.remote_git, request.branch_from
)
parentpath = pagure.utils.get_repo_path(request.project)
else:
repo_from = request.project_from
parentpath = pagure.utils.get_repo_path(request.project)
repopath = parentpath
if repo_from:
repopath = pagure.utils.get_repo_path(repo_from)
repo_obj = pygit2.Repository(repopath)
orig_repo = pygit2.Repository(parentpath)
branch = repo_obj.lookup_branch(request.branch_from)
commitid = None
if branch:
commitid = branch.peel().hex
diff_commits = []
if request.status != "Open":
commitid = request.commit_stop
try:
for commit in repo_obj.walk(commitid, pygit2.GIT_SORT_NONE):
diff_commits.append(commit)
if commit.oid.hex == request.commit_start:
break
except KeyError:
# This happens when repo.walk() cannot find commitid
pass
else:
try:
diff_commits = pagure.lib.git.diff_pull_request(
flask.g.session, request, repo_obj, orig_repo, with_diff=False
)
except pagure.exceptions.PagureException as err:
flask.flash("%s" % err, "error")
return flask.redirect(
flask.url_for(
"ui_ns.view_repo",
username=username,
repo=repo.name,
namespace=namespace,
)
)
except SQLAlchemyError as err: # pragma: no cover
flask.g.session.rollback()
_log.exception(err)
flask.flash(
"Could not update this pull-request in the database", "error"
)
diff_commits.reverse()
patch = pagure.lib.git.commit_to_patch(
repo_obj, diff_commits, diff_view=diff
)
return flask.Response(patch, content_type="text/plain;charset=UTF-8")
|
a7b543294cb66561700b7db7800277d74ed1267c
| 19,661 |
def GL(alpha, f_name, domain_start = 0.0, domain_end = 1.0, num_points = 100):
""" Computes the GL fractional derivative of a function for an entire array
of function values.
Parameters
==========
alpha : float
The order of the differintegral to be computed.
f_name : function handle, lambda function, list, or 1d-array of
function values
This is the function that is to be differintegrated.
domain_start : float
The left-endpoint of the function domain. Default value is 0.
domain_end : float
The right-endpoint of the function domain; the point at which the
differintegral is being evaluated. Default value is 1.
num_points : integer
The number of points in the domain. Default value is 100.
Examples:
>>> DF_poly = GL(-0.5, lambda x: x**2 - 1)
>>> DF_sqrt = GL(0.5, lambda x: np.sqrt(x), 0., 1., 100)
"""
# Flip the domain limits if they are in the wrong order.
if domain_start > domain_end:
domain_start, domain_end = domain_end, domain_start
# Check inputs.
checkValues(alpha, domain_start, domain_end, num_points)
f_values, step_size = functionCheck(f_name, domain_start, domain_end, num_points)
# Get the convolution filter.
b_coeffs = GLcoeffs(alpha, num_points-1)
# Real Fourier transforms for convolution filter and array of function values.
B = np.fft.rfft(b_coeffs)
F = np.fft.rfft(f_values)
result = np.fft.irfft(F*B)*step_size**-alpha
return result
|
226d5d8b49be9a243ac4508a7691c8997503cb4d
| 19,662 |
def check_lint(root_dir, ignore, verbose, dry_run, files_at_a_time,
max_line_len, continue_on_error):
"""Check for lint.
Unless `continue_on_error` is selected, returns `False` on the first
iteration where lint is found, or where the lint checker otherwise
returned failure.
:return: Whether the check found everything OK.
"""
success = True
# Suffixes for types of file that pocketlint can check for us.
pocketlint_suffixes = C_LIKE_SUFFIXES + PERL_SUFFIXES + [
'.ini',
# Don't check for now. Styles differ too much.
# '.css',
'.js',
'.md',
'.cgi',
'.php',
'.py',
'.sh',
]
lintable_files = find_files(
root_dir, ignore=ignore, suffixes=pocketlint_suffixes)
command_line = ['pocketlint', '-m', '%d' % max_line_len, '--']
for chunk in chunk_file_list(lintable_files, files_at_a_time):
try:
run_command(
command_line + chunk, verbose=verbose, dry_run=dry_run)
except CalledProcessError:
success = False
if not success and not continue_on_error:
return False
return success
|
c43b7a05bf47b9108281bb7c0cef4ff1d6e107d3
| 19,663 |
def remove_comments(s):
"""
Examples
--------
>>> code = '''
... # comment 1
... # comment 2
... echo foo
... '''
>>> remove_comments(code)
'echo foo'
"""
return "\n".join(l for l in s.strip().split("\n") if not l.strip().startswith("#"))
|
1d3e1468c06263d01dd204c5ac89235a17f50972
| 19,664 |
def generate_wsl(ws):
"""
Generates watershed line that correspond to areas of
touching objects.
"""
se = square(3)
ero = ws.copy()
ero[ero == 0] = ero.max() + 1
ero = erosion(ero, se)
ero[ws == 0] = 0
grad = dilation(ws, se) - ero
grad[ws == 0] = 0
grad[grad > 0] = 255
grad = grad.astype(np.uint8)
return grad
|
6d61b2b366ca6a4b94f8a6513f1a0a5fb1bfd8c9
| 19,665 |
def train_lstm_model(x, y,
epochs=200,
patience=10,
lstm_dim=48,
batch_size=128,
lr=1e-3):
"""
Train an LSTM to predict purchase (1) or abandon (0)
:param x: session sequences
:param y: target labels
:param epochs: num training epochs
:param patience: early stopping patience
:param lstm_dim: lstm units
:param batch_size: batch size
:param lr: learning rate
:return:
"""
# Verfiy if GPU/CPU is being used
print("Print out system device...")
print(device_lib.list_local_devices())
print("Starting training now...")
X_train, X_test, y_train, y_test = train_test_split(x,y)
# pad sequences for training in batches
max_len = max(len(_) for _ in x)
X_train = pad_sequences(X_train, padding="post",value=7, maxlen=max_len)
X_test = pad_sequences(X_test, padding="post", value=7, maxlen=max_len)
# convert to one-hot
X_train = tf.one_hot(X_train, depth=7)
X_test = tf.one_hot(X_test, depth=7)
y_train = np.array(y_train)
y_test = np.array(y_test)
# Define Model
model = keras.Sequential()
model.add(keras.layers.InputLayer(input_shape=(None,7)))
# Masking layer ignores padded time-steps
model.add(keras.layers.Masking())
model.add(keras.layers.LSTM(lstm_dim))
model.add(keras.layers.Dense(1,activation='sigmoid'))
model.summary()
# Some Hyper Params
opt = keras.optimizers.Adam(learning_rate=lr)
loss = keras.losses.BinaryCrossentropy()
es = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=patience,
verbose=1,
restore_best_weights=True)
# Include wandb callback for tracking
callbacks = [es, WandbCallback()]
model.compile(optimizer=opt,
loss=loss,
metrics=['accuracy'])
# Train Model
model.fit(X_train, y_train,
validation_data=(X_test,y_test),
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks)
# return trained model
# NB: to store model as Metaflow Artifact it needs to be pickle-able!
return model.to_json(), model.get_weights(), model
|
159ad99e419d659e655bcdd9556a6ae3202071ae
| 19,666 |
def nash_sutcliffe_efficiency(predicted, observed):
""" implements Nash-Sutcliffe Model Efficiencobserved Coefficient where predicted is modeled and observed is observed"""
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
return 1 - np.sum((predicted - observed)**2) / np.sum((observed - observed.mean())**2)
|
b9820cf95472499d6e6c24e47ffb4bbd5574e439
| 19,667 |
def tf_Affine_transformer(points, theta):
"""
Arguments:
points: `Matrix` [2, np] of grid points to transform
theta: `Matrix` [bs, 2, 3] with a batch of transformations
"""
with tf.name_scope('Affine_transformer'):
num_batch = tf.shape(theta)[0]
grid = tf.tile(tf.expand_dims(points, 0), [num_batch, 1, 1])
# Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)
T_g = tf.matmul(theta, grid)
return T_g
|
a024609ac386b7d12173fb3965233ae4c21233d2
| 19,668 |
def read_COCO_gt(filename, n_imgs=None, ret_img_sizes=False, ret_classes=False, bbox_gt=False):
"""
Function for reading COCO ground-truth files and converting them to GroundTruthInstances format.
:param filename: filename of the annotation.json file with all COCO ground-truth annotations
:param n_imgs: number of images ground-truth is being extracted from. If None extract all (default None)
:param ret_img_sizes: Boolean flag dictating if the image sizes should be returned
:param ret_classes: Boolean flag dictating if the class mapping dictionary should be returned
:param bbox_gt: Boolean flag dictating if the GroundTruthInstance should ignore the segmentation mask and only use
bounding box information.
:return: ground-truth instances as GTLoader and optionally image sizes or class mapping dictionary if requested
"""
# read the json file
coco_obj = COCO(filename)
gt_instances = GTLoader(coco_obj, n_imgs, bbox_gt=bbox_gt)
# Return image sizes if requested
if ret_img_sizes:
return gt_instances, [
[coco_obj.imgs[img_id]['height'], coco_obj.imgs[img_id]['width']]
for img_id in sorted(coco_obj.imgs.keys())
]
# Return class mapping dictionary if requested
if ret_classes:
return gt_instances, {
coco_obj.cats[cat_id]['name']: idx
for idx, cat_id in enumerate(sorted(coco_obj.cats.keys()))
}
return gt_instances
|
dfcfa69ee620ac3546b1f646c9c23f126a9822c3
| 19,669 |
def get_metrics_from_file(metric_file):
"""Gets all metric functions within a file
:param str metric_file: The name of the file to look in
:return: Tuples containing (function name, function object)
:rtype: list
"""
try:
metrics = import_module(metric_file)
metrics = get_sorted_metric_function_tuples(metrics)
except ImportError:
raise NoMetricFileFound
if not metrics:
raise NoMetricFunctionsFound
return metrics
|
12e59feb03b6d8571fb7a5ef8f02e81e53605b0b
| 19,670 |
def mnist_model(inputs, mode):
"""Takes the MNIST inputs and mode and outputs a tensor of logits."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
inputs = tf.reshape(inputs, [-1, 28, 28, 1])
data_format = 'channels_last'
if tf.test.is_built_with_cuda():
# When running on GPU, transpose the data from channels_last (NHWC) to
# channels_first (NCHW) to improve performance.
# See https://www.tensorflow.org/performance/performance_guide#data_formats
data_format = 'channels_first'
inputs = tf.transpose(inputs, [0, 3, 1, 2])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(
inputs=inputs,
filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu,
data_format=data_format)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2,
data_format=data_format)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu,
data_format=data_format)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2,
data_format=data_format)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=1024,
activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=(mode == tf.estimator.ModeKeys.TRAIN))
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=10)
return logits
|
7807adce4030d070c79eea5f3a1991ff4c4e1cd6
| 19,671 |
import base64
import io
def show_local_mp4_video(file_name, width=640, height=480):
"""Renders a mp4 video on a Jupyter notebook
Args:
file_name (str): Path to file.
width (int): Video width.
height (int): Video height.
Returns:
obj: Video render as HTML object.
"""
video_encoded = base64.b64encode(io.open(file_name, 'rb').read())
return HTML(data='''<video width="{0}" height="{1}" alt="test" controls>
<source src="data:video/mp4;base64,{2}" type="video/mp4" />
</video>'''.format(width, height, video_encoded.decode('ascii')))
|
4f2b4660b005edcf865eca1c6632ffa6c0899fe8
| 19,672 |
from datetime import datetime
def change_status(sid, rev, status, **kwargs):
"""
[INCOMPLETE]
- DISABLE OTHER REVISION OF THE SAME SIGNTURE WHEN DEPLOYING ONE
Change the status of a signature
Variables:
sid => ID of the signature
rev => Revision number of the signature
status => New state
Arguments:
None
Data Block:
None
Result example:
{ "success" : true } #If saving the rule was a success or not
"""
DEPLOYED_STATUSES = ['DEPLOYED', 'NOISY', 'DISABLED']
DRAFT_STATUSES = ['STAGING', 'TESTING']
STALE_STATUSES = ['INVALID']
user = kwargs['user']
if status == 'INVALID':
return make_api_response("",
"INVALID signature status is reserved for service use only.",
403)
if not user['is_admin'] and status in DEPLOYED_STATUSES:
return make_api_response("",
"Only admins are allowed to change the signature status to a deployed status.",
403)
key = "%sr.%s" % (sid, rev)
data = STORAGE.get_signature(key)
if data:
if not Classification.is_accessible(user['classification'], data['meta'].get('classification',
Classification.UNRESTRICTED)):
return make_api_response("", "You are not allowed change status on this signature", 403)
if data['meta']['al_status'] in STALE_STATUSES and status not in DRAFT_STATUSES:
return make_api_response("",
"Only action available while signature in {} status is to change "
"signature to a DRAFT status"
.format(data['meta']['al_status']),
403)
if data['meta']['al_status'] in DEPLOYED_STATUSES and status in DRAFT_STATUSES:
return make_api_response("", "You cannot change the status of signature %s r.%s from %s to %s." %
(sid, rev, data['meta']['al_status'], status), 403)
query = "meta.al_status:{status} AND _yz_rk:{sid}* AND NOT _yz_rk:{key}"
today = datetime.date.today().isoformat()
uname = user['uname']
if status not in ['DISABLED', 'INVALID', 'TESTING']:
for other in STORAGE.get_signatures(
STORAGE.list_filtered_signature_keys(
query.format(key=key, sid=sid, status=status)
)
):
other['meta']['al_state_change_date'] = today
other['meta']['al_state_change_user'] = uname
other['meta']['al_status'] = 'DISABLED'
other_sid = other['meta']['id']
other_rev = other['meta']['rule_version']
other_key = "%sr.%s" % (other_sid, other_rev)
STORAGE.save_signature(other_key, other)
data['meta']['al_state_change_date'] = today
data['meta']['al_state_change_user'] = uname
data['meta']['al_status'] = status
STORAGE.save_signature(key, data)
return make_api_response({"success": True})
else:
return make_api_response("", "Signature not found. (%s r.%s)" % (sid, rev), 404)
|
dce934db6c7fe34e184ff98a63f2bc5e32efaffe
| 19,673 |
from re import X
def trilinear_interpolation(a: np.ndarray, factor: float) -> np.ndarray:
"""Resize an three dimensional array using trilinear
interpolation.
:param a: The array to resize. The array is expected to have at
least three dimensions.
:param factor: The amount to resize the array. Given how the
interpolation works, you probably don't get great results
with factor less than or equal to .5. Consider multiple
passes of interpolation with larger factors in those cases.
:return: A :class:ndarray object.
:rtype: numpy.ndarray
Usage::
>>> import numpy as np
>>>
>>> a = np.array([
... [
... [0, 1],
... [1, 0],
... ],
... [
... [1, 0],
... [0, 1],
... ],
... ])
>>> trilinear_interpolation(a, 2)
array([[[0. , 0.5, 1. , 1. ],
[0.5, 0.5, 0.5, 0.5],
[1. , 0.5, 0. , 0. ],
[1. , 0.5, 0. , 0. ]],
<BLANKLINE>
[[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5]],
<BLANKLINE>
[[1. , 0.5, 0. , 0. ],
[0.5, 0.5, 0.5, 0.5],
[0. , 0.5, 1. , 1. ],
[0. , 0.5, 1. , 1. ]],
<BLANKLINE>
[[1. , 0.5, 0. , 0. ],
[0.5, 0.5, 0.5, 0.5],
[0. , 0.5, 1. , 1. ],
[0. , 0.5, 1. , 1. ]]])
"""
# Return the array unchanged if the array won't be magnified.
if factor == 1:
return a
# Perform a defensive copy of the original array to avoid
# unexpected side effects.
a = a.copy()
# Since we are magnifying the given array, the new array's shape
# will increase by the magnification factor.
mag_size = tuple(int(s * factor) for s in a.shape)
# Map out the relationship between the old space and the
# new space.
indices = np.indices(mag_size)
if factor > 1:
whole = (indices // factor).astype(int)
parts = (indices / factor - whole).astype(float)
else:
new_ends = [s - 1 for s in mag_size]
old_ends = [s - 1 for s in a.shape]
true_factors = [n / o for n, o in zip(new_ends, old_ends)]
for i in range(len(true_factors)):
if true_factors[i] == 0:
true_factors[i] = .5
whole = indices.copy()
parts = indices.copy()
for i in Z, Y, X:
whole[i] = (indices[i] // true_factors[i]).astype(int)
parts[i] = (indices[i] / true_factors[i] - whole[i]).astype(float)
del indices
# Trilinear interpolation determines the value of a new pixel by
# comparing the values of the eight old pixels that surround it.
# The hashes are the keys to the dictionary that contains those
# old pixel values. The key indicates the position of the pixel
# on each axis, with one meaning the position is ahead of the
# new pixel, and zero meaning the position is behind it.
hashes = [f'{n:>03b}'[::-1] for n in range(2 ** 3)]
hash_table = {}
# The original array needs to be made one dimensional for the
# numpy.take operation that will occur as we build the tables.
raveled = np.ravel(a)
# Build the table that contains the old pixel values to
# interpolate.
for hash in hashes:
hash_whole = whole.copy()
# Use the hash key to adjust the which old pixel we are
# looking at.
for axis in Z, Y, X:
if hash[axis] == '1':
hash_whole[axis] += 1
# Handle the pixels that were pushed off the far
# edge of the original array by giving them the
# value of the last pixel along that axis in the
# original array.
m = np.zeros(hash_whole[axis].shape, dtype=bool)
m[hash_whole[axis] >= a.shape[axis]] = True
hash_whole[axis][m] = a.shape[axis] - 1
# Since numpy.take() only works in one dimension, we need to
# map the three dimensional indices of the original array to
# the one dimensional indices used by the raveled version of
# that array.
raveled_indices = hash_whole[Z] * a.shape[Y] * a.shape[X]
raveled_indices += hash_whole[Y] * a.shape[X]
raveled_indices += hash_whole[X]
# Get the value of the pixel in the original array.
hash_table[hash] = np.take(raveled, raveled_indices.astype(int))
# Once the hash table has been built, clean up the working arrays
# in case we are running short on memory.
else:
del hash_whole, raveled_indices, whole
# Everything before this was to set up the interpolation. Now that
# it's set up, we perform the interpolation. Since we are doing
# this across three dimensions, it's a three stage process. Stage
# one is along the X axis.
x1 = lerp(hash_table['000'], hash_table['001'], parts[X])
x2 = lerp(hash_table['010'], hash_table['011'], parts[X])
x3 = lerp(hash_table['100'], hash_table['101'], parts[X])
x4 = lerp(hash_table['110'], hash_table['111'], parts[X])
# Stage two is along the Y axis.
y1 = lerp(x1, x2, parts[Y])
y2 = lerp(x3, x4, parts[Y])
del x1, x2, x3, x4
# And stage three is along the Z axis. Since this is the last step
# we can just return the result.
return lerp(y1, y2, parts[Z])
|
a3ed2c13f13bdc37cbe47cd7ed6862a67cdebd66
| 19,674 |
def load_data(path):
"""将材料的label与text进行分离,得到两个list"""
label_list = []
text_list = []
with open(path, 'r') as f:
for line in f.readlines():
data = line.strip().split('\t')
data[1] = data[1].strip().split()
label = [0 for i in range(8)]
total = 0
for i in range(0, 8):
label[i] = float(data[1][1 + i].split(':')[1])
total += label[i]
for i in range(len(label)):
label[i] /= total
label_list.append(label)
text_list.append(data[2].strip().split())
return label_list, text_list
|
2274e0e327844c4dedae229b3c03a344653f7342
| 19,675 |
def invoke(request):
"""Where the magic happens..."""
with monitor(labels=_labels, name="transform_request"):
transformed_request = _transform_request(request)
with monitor(labels=_labels, name="invoke"):
response = _model.predict(transformed_request)
with monitor(labels=_labels, name="transform_response"):
transformed_response = _transform_response(response)
return transformed_response
|
4547de901cfd2cc153ea67632c2a002a17a15d8b
| 19,676 |
import imp
import imp
import imp
def serializers(): #FIXME: could be much smarter
"""return a tuple of string names of serializers"""
try:
imp.find_module('cPickle')
serializers = (None, 'pickle', 'json', 'cPickle', 'dill')
except ImportError:
serializers = (None, 'pickle', 'json', 'dill')
try:
imp.find_module('cloudpickle')
serializers += ('cloudpickle',)
except ImportError:
pass
try:
imp.find_module('jsonpickle')
serializers += ('jsonpickle',)
except ImportError:
pass
return serializers
|
98e47f0c3dd7a6c70ba1ec88a5d1d0ffca8be79e
| 19,677 |
def munge_pocket_response(resp):
"""Munge Pocket Article response."""
articles = resp['list']
result = pd.DataFrame([articles[id] for id in articles])
# only munge if actual articles present
if len(result) != 0:
result['url'] = (result['resolved_url'].combine_first(result['given_url']))
for time_col in ['time_added', 'time_updated', 'time_read']:
result[time_col] = pd.to_datetime(result[time_col], unit='s')
return (
result.drop_duplicates(subset=['resolved_id'])[[
'item_id', 'resolved_id', 'given_title', 'url', 'resolved_title', 'time_added',
'time_read', 'time_updated', 'status', 'word_count'
]]
)
|
32695526a784cc95aeb428ca2481dcf9053e72ed
| 19,678 |
import os
def __abs_path(path):
"""
Creates an absolute path, based on the relative path from the configuration file
:param path: A relative path
:return: The absolute path, based on the configuration file
"""
if not os.path.isabs(path):
parent = os.path.abspath(os.path.join(config_path, os.pardir))
return os.path.abspath(os.path.join(os.path.relpath(parent), path)) + os.path.sep
else:
return path
|
4b790fda478f583c6b78c05e10e01dc68eda7e25
| 19,679 |
import time
def fake_data_PSBL_phot(outdir='', outroot='psbl',
raL=259.5, decL=-29.0,
t0=57000.0, u0_amp=0.8, tE=500.0,
piE_E=0.02, piE_N=0.02,
q=0.5, sep=5.0, phi=75.0, b_sff1=0.5, mag_src1=16.0,
parallax=True, target='Unknown', animate=False):
"""
Optional Inputs
---------------
outdir : str
The output directory where figures and data are saved.
outroot : str
The output file name root for a saved figure.
raL : float (deg)
The right ascension in degrees. Needed if parallax=True.
decL : float (deg)
The declination in degrees. Needed if parallax=False.
t0: float
Time of photometric peak, as seen from Earth [MJD]
u0_amp: float
Angular distance between the lens and source on the plane of the
sky at closest approach in units of thetaE. It can be
positive (u0_hat cross thetaE_hat pointing away from us) or
negative (u0_hat cross thetaE_hat pointing towards us).
tE: float
Einstein crossing time. [MJD]
piE_E: float
The microlensing parallax in the East direction in units of thetaE
piE_N: float
The microlensing parallax in the North direction in units of thetaE
q: float
Mass ratio (low-mass / high-mass)
sep: float
Angular separation of the two lenses in units of thetaE where
thetaE is defined with the total binary mass.
phi: float
Angle made between the binary axis and the relative proper motion vector,
measured in degrees.
b_sff: array or list
The ratio of the source flux to the total (source + neighbors + lens)
b_sff = f_S / (f_S + f_L + f_N). This must be passed in as a list or
array, with one entry for each photometric filter.
mag_src: array or list
Photometric magnitude of the source. This must be passed in as a
list or array, with one entry for each photometric filter.
"""
start = time.time()
if parallax:
psbl = model.PSBL_Phot_Par_Param1(t0, u0_amp, tE, piE_E, piE_N, q, sep, phi,
[b_sff1], [mag_src1],
raL=raL, decL=decL, root_tol=1e-8)
else:
psbl = model.PSBL_Phot_noPar_Param1(t0, u0_amp, tE, piE_E, piE_N, q, sep, phi,
[b_sff1], [mag_src1],
root_tol=1e-8)
# Simulate
# photometric observations every 1 day and
# for the bulge observing window. Observations missed
# for 125 days out of 365 days for photometry.
t_pho = np.array([], dtype=float)
for year_start in np.arange(54000, 60000, 365.25):
phot_win = 240.0
phot_start = (365.25 - phot_win) / 2.0
t_pho_new = np.arange(year_start + phot_start,
year_start + phot_start + phot_win, 1)
t_pho = np.concatenate([t_pho, t_pho_new])
t_mod = np.arange(t_pho.min(), t_pho.max(), 1)
i_pho, A_pho = psbl.get_all_arrays(t_pho)
i_mod, A_mod = psbl.get_all_arrays(t_mod)
imag_pho = psbl.get_photometry(t_pho, amp_arr=A_pho)
imag_mod = psbl.get_photometry(t_mod, amp_arr=A_mod)
# Make the photometric observations.
# Assume 0.05 mag photoemtric errors at I=19.
# This means Signal = 400 e- at I=19.
flux0 = 400.0
imag0 = 19.0
flux_pho = flux0 * 10 ** ((imag_pho - imag0) / -2.5)
flux_pho_err = flux_pho ** 0.5
flux_pho += np.random.randn(len(t_pho)) * flux_pho_err
imag_pho = -2.5 * np.log10(flux_pho / flux0) + imag0
imag_pho_err = 1.087 / flux_pho_err
stop = time.time()
fmt = 'It took {0:.2f} seconds to evaluate the model at {1:d} time steps'
print(fmt.format(stop - start, len(t_mod) + len(t_pho)))
##########
# Plot photometry
##########
plt.figure(1)
plt.clf()
plt.errorbar(t_pho, imag_pho, yerr=imag_pho_err, fmt='k.', label='Sim Obs',
alpha=0.2)
plt.plot(t_mod, imag_mod, color='red', label='Model')
plt.gca().invert_yaxis()
plt.xlabel('Time (MJD)')
plt.ylabel('I (mag)')
plt.legend()
data = {}
data['t_phot1'] = t_pho
data['mag1'] = imag_pho
data['mag_err1'] = imag_pho_err
data['phot_files'] = ['fake_data_parallax_phot1']
data['ast_files'] = ['fake_data_parallax_ast1']
data['target'] = target
data['phot_data'] = 'sim'
data['ast_data'] = 'sim'
data['raL'] = raL
data['decL'] = decL
params = {}
params['t0'] = t0
params['u0_amp'] = u0_amp
params['tE'] = tE
params['piE_E'] = piE_E
params['piE_N'] = piE_N
params['q'] = q
params['sep'] = sep
params['phi'] = phi
params['b_sff'] = b_sff1
params['mag_src'] = mag_src1
out_name = outdir + outroot + '_movie.gif'
if animate:
ani = plot_models.animate_PSBL(psbl, outfile=out_name)
else:
ani = None
return data, params, psbl, ani
|
bde4d98d0936be3b0cd655879bbfdbde2e1a5826
| 19,680 |
import pathlib
def is_dicom(path: pathlib.Path) -> bool:
"""Check if the input is a DICOM file.
Args:
path (pathlib.Path): Path to the file to check.
Returns:
bool: True if the file is a DICOM file.
"""
path = pathlib.Path(path)
is_dcm = path.suffix.lower() == ".dcm"
is_dcm_dir = path.is_dir() and any(
p.suffix.lower() == ".dcm" for p in path.iterdir()
)
return is_dcm or is_dcm_dir
|
1e20ace9c645a41817bf23a667bd4e1ac815f63f
| 19,681 |
from typing import Optional
def _hessian(model: 'BinaryLogReg', data: Dataset, data_weights: Optional[jnp.ndarray]) -> jnp.ndarray:
"""Ravelled Hessian matrix of the objective function with respect to the model parameters"""
params_flat, unravel = ravel_pytree(model.params)
random_params = model.random_params
h = jax.hessian(lambda p: _objective(unravel(p), random_params, data, model.lamb, model.pos_label, data_weights))
return h(params_flat)
|
5824fe0d2def5d03e8ac3f773641d19d6aebfa3e
| 19,682 |
import re
def promax2meta(doc, target):
"""
Return meta information (Line or Area) of csv Promax geometry file.
Arguments:
doc -- csv Promax geometry file
target -- meta information to get (Line or Area)
"""
ptarget = r'' + re.escape(target) + r'\s*[=:]\s*\"?([\w-]+)\"?'
for line in open(doc):
result = (re.search(ptarget, line, re.I))
if result:
return result.group(1)
|
7dce362112aa7fb6fa24999c4f870107b24c3d40
| 19,683 |
def axLabel(value, unit):
"""
Return axis label for given strings.
:param value: Value for axis label
:type value: int
:param unit: Unit for axis label
:type unit: str
:return: Axis label as \"<value> (<unit>)\"
:rtype: str
"""
return str(value) + " (" + str(unit) + ")"
|
cc553cf4334222a06ae4a2bcec5ec5acb9668a8f
| 19,684 |
import hashlib
import time
def save_notebook(filename, timeout=10):
"""
Force-saves a Jupyter notebook by displaying JavaScript.
Args:
filename (``str``): path to notebook file being saved
timeout (``int`` or ``float``): number of seconds to wait for save before timing-out
Returns
``bool``: whether the notebook was saved successfully
"""
timeout = timeout * 10**9
if get_ipython() is not None:
with open(filename, "rb") as f:
md5 = hashlib.md5(f.read()).hexdigest()
start = time.time_ns()
display(Javascript("Jupyter.notebook.save_checkpoint();"))
curr = md5
while curr == md5 and time.time_ns() - start <= timeout:
time.sleep(1)
with open(filename, "rb") as f:
curr = hashlib.md5(f.read()).hexdigest()
return curr != md5
return True
|
4d02f1eb48459c412a119fcab7d8df7515c1b465
| 19,685 |
def test_gen():
"""Create the test system."""
project_name = "test_grid_sinfactory"
return PFactoryGrid(project_name=project_name).gens["SM1"]
|
cd8009b55bfced7fbafc2914b80e0dd2cd2851fc
| 19,686 |
def validate_api_key():
"""Validates an API key submitted via POST."""
api_key_form = ApiKeyForm()
api_key_form.organization.choices = session['orgs_list']
if api_key_form.validate_on_submit():
session['org_id'] = api_key_form.organization.data
return jsonify(True)
return jsonify(api_key_form.errors), 422
|
1cf72017600222992cb9d622c6b718b8dc84bae8
| 19,687 |
def plot_clickForPlane():
""" Create a Plane at location of one mouse click in the view or
onto a clicked object or
at a pre-selected point location:
Create a Plane perpendicular to the view at location of one mouse click.
- Click first on the Button then click once on the View.
- Click first on the Button then click once on one object of the View
to attach the plane at the object.
But you can also select an already existing point first and click the button
to attach the plane.
"""
msg = verbose
createFolders('WorkPlanes')
m_actDoc = get_ActiveDocument(info=msg)
if m_actDoc.Name is None:
return None
m_selEx = Gui.Selection.getSelectionEx(m_actDoc.Name)
if len(m_selEx) >= 1:
SelectedObjects = get_SelectedObjects(info=1)
Number_of_Points = SelectedObjects[0]
if (Number_of_Points == 1):
Point_List = SelectedObjects[3]
name = "Plane"
part = "Part::Feature"
# return view direction as a vector
Plane_Normal = Gui.ActiveDocument.ActiveView.getViewDirection()
# Set the base of the plane at location of mouse click
Plane_Point = Point_List[-1].Point
# Create a Plane
Plane_User_Name, plane = plot_plane(m_lengthPlane, m_widthPlane, Plane_Point, Plane_Normal, part, name)
else:
printError_msg("Either select first one Point and Click the button or \n" +
"Click the button and one free mouse click in the view or" +
"Click the button and one mouse click on an object of the view !")
else:
global m_callback
#view = Gui.ActiveDocument.ActiveView
view = get_ActiveView()
# m_callback = view.addEventCallbackPivy(SoMouseButtonEvent.getClassTypeId(),getClickedPlane)
m_callback = view.addEventCallback("SoMouseButtonEvent", getClickedPlane2)
|
62acc0e41ce165047f6dabf99d0df8fd2b1db000
| 19,688 |
def is_logged_in(f):
"""
is logged in decorator
"""
@wraps(f)
def wrap(*args, **kwargs):
"""
wrap from template
"""
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized, Please login', 'danger')
return redirect(url_for('login'))
return wrap
|
732bf60bf0901fc341f81c3e6db3516052ecfd12
| 19,689 |
def dataframe_from_mult_files(filenames):
"""@param filenames (List[Str]): list of filenames"""
dfs = []
for filename in filenames:
dfs.append(dataframe_from_file(filename))
return pd.concat(dfs, axis=0)
|
76f37d5cef6a8b44946ef25536a135db339beca6
| 19,690 |
def batch_euclidean_dist(x, y, min_val):
""" euclidean_dist function over batch
x and y are batches of matrices x' and y':
x' = (x'_1, | x'_2 | ... | x'_m).T
y' = (y'_1, | y'_2 | ... | y'_n).T
Where x_i and y_j are vectors. We calculate the distances between each pair x_i and y_j.
res'[i, j] = dict(x'_i, y'_j)
res (batch of res') will have the shape [batch_size, m, n]
For calculation we use the formula x^2 - 2xy + y^2.
Clipped to prevent zero distances for numerical stability.
"""
_, m, _ = x.shape
_, n, _ = y.shape
# shape [N, m, n]
xx = ops.pows(x, 2).sum(-1, keepdims=True).repeat(n, axis=-1)
yy = ops.pows(y, 2).sum(-1, keepdims=True).repeat(m, axis=-1).transpose(0, 2, 1)
dist = xx + yy
dist = 1 * dist - 2 * ops.batch_dot(x, y.transpose(0, 2, 1))
# Avoiding zeros for numerical stability
dist = ops.maximum(
dist,
min_val,
)
dist = ops.sqrt(dist)
return dist
|
1eec65330ba8970fd84c7d9c7c57e91cd79e0e6f
| 19,691 |
def outgroup_reformat(newick, outgroup):
"""
Move the location of the outgroup in a newick string to be at the end of the string
Inputs:
newick --- a newick string to be reformatted
outgroup --- the outgroup
Output:
newick --- the reformatted string
"""
# Replace the outgroup and comma with an empty string
newick = newick.replace(outgroup + ",", "")
newick = newick[:-2] + "," + outgroup + ");"
return newick
|
a45be59deb95d7bb61ea82a111d4390e49d4b7a8
| 19,692 |
def get_source_token(request):
"""
Perform token validation for the presqt-source-token header.
Parameters
----------
request : HTTP request object
Returns
-------
Returns the token if the validation is successful.
Raises a custom AuthorizationException error if the validation fails.
"""
# Validate that the proper token exists in the request.
try:
return request.META['HTTP_PRESQT_SOURCE_TOKEN']
except KeyError:
raise PresQTValidationError(
"PresQT Error: 'presqt-source-token' missing in the request headers.",
status.HTTP_400_BAD_REQUEST)
|
db53bba32f8471a17d44fae2d3f44749d5a83c86
| 19,693 |
def read_train_valid(filename):
"""
读取训练或者验证文件
:param filename: 训练集/验证集的文件名字
:return:
返回训练集的文本和标签
其中文本是一个list, 标签是一个list(每个元素为int)
返回示例:['我很开心', '你不是真正的快乐', '一切都是假的], [1, 0, 0]
"""
fp = pd.read_table(filename, sep='\t', error_bad_lines=False)
return fp['review'].tolist(), list(map(int, fp['sentiment'].tolist()))
|
f6990db50453e4dd88f8ecd13e1eb345ab15fc87
| 19,694 |
def weighted_regularization_matrix_from(
regularization_weights: np.ndarray,
pixel_neighbors: np.ndarray,
pixel_neighbors_sizes: np.ndarray,
) -> np.ndarray:
"""
From the pixel-neighbors, setup the regularization matrix using the weighted regularization scheme.
Parameters
----------
regularization_weights
The regularization_ weight of each pixel, which governs how much smoothing is applied to that individual pixel.
pixel_neighbors
An array of length (total_pixels) which provides the index of all neighbors of every pixel in
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_sizes
An array of length (total_pixels) which gives the number of neighbors of every pixel in the
Voronoi grid.
Returns
-------
np.ndarray
The regularization matrix computed using an adaptive regularization scheme where the effective regularization
coefficient of every source pixel is different.
"""
pixels = len(regularization_weights)
regularization_matrix = np.zeros(shape=(pixels, pixels))
regularization_weight = regularization_weights ** 2.0
for i in range(pixels):
regularization_matrix[i, i] += 1e-8
for j in range(pixel_neighbors_sizes[i]):
neighbor_index = pixel_neighbors[i, j]
regularization_matrix[i, i] += regularization_weight[neighbor_index]
regularization_matrix[
neighbor_index, neighbor_index
] += regularization_weight[neighbor_index]
regularization_matrix[i, neighbor_index] -= regularization_weight[
neighbor_index
]
regularization_matrix[neighbor_index, i] -= regularization_weight[
neighbor_index
]
return regularization_matrix
|
ecc6301e327adc608530c933ae769bd92ffcaf84
| 19,695 |
def child_at_time(
self,
search_time,
shallow_search=False,
):
"""Return the child that overlaps with time search_time.
search_time is in the space of self.
If shallow_search is false, will recurse into compositions.
"""
range_map = self.range_of_all_children()
# find the first item whose end_time_exclusive is after the
first_inside_range = _bisect_left(
seq=self,
tgt=search_time,
key_func=lambda child: range_map[child].end_time_exclusive(),
)
# find the last item whose start_time is before the
last_in_range = _bisect_right(
seq=self,
tgt=search_time,
key_func=lambda child: range_map[child].start_time,
lower_search_bound=first_inside_range,
)
# limit the search to children who are in the search_range
possible_matches = self[first_inside_range:last_in_range]
result = None
for thing in possible_matches:
if range_map[thing].overlaps(search_time):
result = thing
break
# if the search cannot or should not continue
if (
result is None
or shallow_search
or not hasattr(result, "child_at_time")
):
return result
# before you recurse, you have to transform the time into the
# space of the child
child_search_time = self.transformed_time(search_time, result)
return result.child_at_time(child_search_time, shallow_search)
|
5961a6d20a962b7b698822610bf4cecdf9c33257
| 19,696 |
def weight_variable_truncated_normal(input_dim, output_dim, name=""):
"""Create a weight variable with truncated normal distribution, values
that are more than 2 stddev away from the mean are redrawn."""
initial = tf.truncated_normal([input_dim, output_dim], stddev=0.5)
return tf.Variable(initial, name=name)
|
4c645fc5a914ff99f5b1063f5ecc0b4878481517
| 19,697 |
def get_dummy_vm_create_spec(client_factory, name, data_store_name):
"""Builds the dummy VM create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = name
config_spec.guestId = "otherGuest"
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = 1
config_spec.memoryMB = 4
controller_key = -101
controller_spec = create_controller_spec(client_factory, controller_key)
disk_spec = create_virtual_disk_spec(client_factory, 1024, controller_key)
device_config_spec = [controller_spec, disk_spec]
config_spec.deviceChange = device_config_spec
return config_spec
|
6564197d319b87f0a9dd05ae3053de6ebc11cf5c
| 19,698 |
def test_comparison_ops_eq_t():
"""Check the equal-to operator for a truthy result."""
return """
fn main() {
{dest} = 1 == 1;
}
"""
|
1d6562c26b1103deaf3a8eed4bc8d341a4a7d3e0
| 19,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.