content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def build_asignar_anexos_query(filters, request):
"""
Construye el query de búsqueda a partir de los filtros.
"""
return filters.buildQuery().filter(ambito__path__istartswith=request.get_perfil().ambito.path).order_by('nombre') | 1a176182fa0559bac56df17f32345d2c4c22b1f1 | 16,900 |
import os
def validate_paths(paths_A, paths_B, strict=True, keys_ds=None):
""" Validate the constructed images path lists are consistent.
Can allow using B/HR and A/LR folders with different amount of images
Parameters:
paths_A (str): the path to domain A
paths_B (str): the path to domain B
keys_ds (list): the paired 'dataroot_' properties names expected in the Dataset.
strict (bool): If strict = True, will make sure both lists only contains images
if properly paired in the other dataset, otherwise will fill missing images
paths in LR/A with 'None' to be taken care of later (ie. with on-the-fly
generation)
Examples of OTF usage:
- If an LR image pair is not found, downscale HR on the fly, else, use the LR
- If all LR are provided and 'lr_downscale' is enabled, randomize use of provided
LR and OTF LR for augmentation
"""
if keys_ds is None: keys_ds = ['LR', 'HR']
if not strict:
assert len(paths_B) >= len(paths_A), \
'{} dataset contains less images than {} dataset - {}, {}.'.format(\
keys_ds[1], keys_ds[0], len(paths_B), len(paths_A))
if len(paths_A) < len(paths_B):
print('{} contains less images than {} dataset - {}, {}. Will generate missing images on the fly.'.format(
keys_ds[0], keys_ds[1], len(paths_A), len(paths_B)))
i=0
tmp_A = []
tmp_B = []
for idx in range(0, len(paths_B)):
B_head, B_tail = os.path.split(paths_B[idx])
if i < len(paths_A):
A_head, A_tail = os.path.split(paths_A[i])
if A_tail == B_tail:
A_img_path = os.path.join(A_head, A_tail)
tmp_A.append(A_img_path)
i+=1
if strict:
B_img_path = os.path.join(B_head, B_tail)
tmp_B.append(B_img_path)
else:
if not strict:
A_img_path = None
tmp_A.append(A_img_path)
else: #if the last image is missing
if not strict:
A_img_path = None
tmp_A.append(A_img_path)
paths_A = tmp_A
paths_B = tmp_B if strict else paths_B
assert len(paths_A) == len(paths_B)
return paths_A, paths_B | 71d7bb123524196618f9e743ad2ceb0d696e7ea3 | 16,901 |
def _median(data):
"""Return the median (middle value) of numeric data.
When the number of data points is odd, return the middle data point.
When the number of data points is even, the median is interpolated by
taking the average of the two middle values:
>>> median([1, 3, 5])
3
>>> median([1, 3, 5, 7])
4.0
"""
data = sorted(data)
n = len(data)
if n == 0:
raise ValueError("no median for empty data")
if n % 2 == 1:
return data[n // 2]
else:
i = n // 2
return (data[i - 1] + data[i]) / 2 | f05a6b067f95fc9e3fc9350b163b3e89c0792814 | 16,902 |
def num_translate(value: str) -> str:
"""переводит числительное с английского на русский """
str_out = NUM_DICT.get(value)
return str_out | 8555556843ea5235f462dbb7b092eaa09168ab0e | 16,903 |
def get_patch_shape(corpus_file):
"""Gets the patch shape (height, width) from the corpus file.
Args:
corpus_file: Path to a TFRecords file.
Returns:
A tuple (height, width), extracted from the first record.
Raises:
ValueError: if the corpus_file is empty.
"""
example = tf.train.Example()
try:
example.ParseFromString(next(tf.python_io.tf_record_iterator(corpus_file)))
except StopIteration as e:
raise ValueError('corpus_file cannot be empty: %s' % e)
return (example.features.feature['height'].int64_list.value[0],
example.features.feature['width'].int64_list.value[0]) | 054a43d1aa7809b55c57fa0e7574dd43273f4bae | 16,904 |
import re
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError as e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes)) | 78ee345ed67d61994245efff8b103d908aa1a7a4 | 16,905 |
def get_children_as_dict(parent):
"""For a given parent object, return all children as a dictionary with the childs tag as key"""
child_list = getChildElementsListWithSpecificXpath(parent, "*")
child_dict = {}
for child in child_list:
value = get_children_as_dict(child)
if child.tag not in child_dict:
child_dict[child.tag] = [value] if value != {} else [child.text]
else:
child_dict[child.tag].append(value if value != {} else child.text)
return child_dict | 054d3591a34536c79e0e5b3715dad6e414d29d46 | 16,906 |
from sys import path
def load_ascii_font(font_name):
"""
Load ascii font from a txt file.
Parameter
---------
font_name: name of the font (str).
Return
------
font: font face from the file (dic).
Version
-------
Specification: Nicolas Van Bossuyt (v1. 27/02/17)
Notes
-----
Load font in figlet format (http://www.figlet.org).
"""
chars = " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_'abcdefghijklmnopqrstuvwxyz{|}~ÄÖÜäöüβ"
font = {}
char_index = 0
current_char = ''
current_char_width = 0
font_path = 'art/%s' % (font_name)
if not path.isfile(font_path):
return None
f = open(font_path, 'r')
for line in f:
current_char_width = len(line.replace('@', '')) - 1
current_char += line.replace('@', '')
if line.endswith('@@\n'):
font[chars[char_index]] = {}
font[chars[char_index]]['text'] = current_char
font[chars[char_index]]['width'] = current_char_width
current_char = ''
char_index += 1
f.close()
return font | bdf91ce0ccdb574587d71ec9283d15cde09f0f0f | 16,907 |
import itertools
import six
def get_multi_tower_fn(num_gpus, variable_strategy,
model_fn, device_setter_fn, lr_provider):
"""Returns a function that will build the resnet model.
Args:
num_gpus: number of GPUs to use (obviously)
variable_strategy: "GPU" or "CPU"
model_fn: The function providing the model as in
loss, gradvars, preds = model_fn(is_training,
features,
labels,
data_format, params)
lr_provider: a function that takes a tf.train.get_global_step() and returns
a learning rate value for that step
device_setter_fn: A device setter
"""
def _multi_tower_model_fn(features, labels, mode, params):
"""A model function that distributes models amongst towers.
Support single host, one or more GPU training. Parameter distribution can
be either one of the following scheme.
1. CPU is the parameter server and manages gradient updates.
2. Parameters are distributed evenly across all GPUs, and the first GPU
manages gradient updates.
Args:
features: a list of tensors, one for each tower
labels: a list of tensors, one for each tower
mode: ModeKeys.TRAIN or EVAL
params: Hyperparameters suitable for tuning
Returns:
A EstimatorSpec object.
"""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
momentum = params.momentum
tower_features = features
tower_labels = labels
tower_losses = []
tower_gradvars = []
tower_preds = []
# channels first (NCHW) is normally optimal on GPU and channels last (NHWC)
# on CPU. The exception is Intel MKL on CPU which is optimal with
# channels_last.
data_format = params.data_format
if not data_format:
if num_gpus == 0:
data_format = 'channels_last'
else:
data_format = 'channels_first'
if num_gpus == 0:
num_devices = 1
device_type = 'cpu'
else:
num_devices = num_gpus
device_type = 'gpu'
for i in range(num_devices):
worker_device = '/{}:{}'.format(device_type, i)
device_setter = device_setter_fn(
variable_strategy, worker_device, num_gpus)
with tf.variable_scope('neural_network', reuse=bool(i != 0)):
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
loss, gradvars, preds = \
model_fn(is_training,
tower_features[i],
tower_labels[i],
data_format, params)
tower_losses.append(loss)
tower_gradvars.append(gradvars)
tower_preds.append(preds)
if i == 0:
# Only trigger batch_norm moving mean and variance update from
# the 1st tower. Ideally, we should grab the updates from all
# towers but these stats accumulate extremely fast so we can
# ignore the other stats from the other towers without
# significant detriment.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
name_scope)
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope('gradient_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in six.iteritems(all_grads):
# Average gradients on the same device as the variables
# to which they apply.
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
# Device that runs the ops to apply global gradient updates.
consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'
with tf.device(consolidation_device):
learning_rate = lr_provider(tf.train.get_global_step())
loss = tf.reduce_mean(tower_losses, name='loss')
examples_sec_hook = reporting_utils.ExamplesPerSecondHook(
params.train_batch_size, every_n_steps=10)
tensors_to_log = {'learning_rate': learning_rate, 'loss': loss}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
train_hooks = [logging_hook, examples_sec_hook]
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
if params.sync:
raise ValueError("We don't support parallel processing at the moment.")
# optimizer = tf.train.SyncReplicasOptimizer(
# optimizer, replicas_to_aggregate=num_workers)
# sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)
# train_hooks.append(sync_replicas_hook)
train_op = [
optimizer.apply_gradients(
gradvars, global_step=tf.train.get_global_step())
]
# noinspection PyUnboundLocalVariable
train_op.extend(update_ops)
train_op = tf.group(*train_op)
predictions = {
'classes':
tf.concat([p['classes'] for p in tower_preds], axis=0),
'probabilities':
tf.concat([p['probabilities'] for p in tower_preds], axis=0)
}
stacked_labels = tf.concat(labels, axis=0)
metrics = {
'accuracy':
tf.metrics.accuracy(stacked_labels, predictions['classes'])
}
# noinspection PyUnboundLocalVariable
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
training_hooks=train_hooks,
eval_metric_ops=metrics)
return _multi_tower_model_fn | 72e149d25cc6c80a8ea4c5f35f9215cb71aa2a65 | 16,908 |
from typing import Union
def extract_by_css(
content: str, selector: str, *, first: bool = True
) -> Union[str, list]:
"""Extract values from HTML content using CSS selector.
:param content: HTML content
:param selector: CSS selector
:param first: (optional) return first found element or all of them
:return: value of the 1st found element or emtpy string if not found; or a list of all found elements
"""
extracted = ScrapySelector(text=content).css(selector).extract()
if first:
result = extracted[0] if len(extracted) > 0 else ""
else:
result = extracted
return result | b03d76c893c0f23da332c9978bedc5c9c408e840 | 16,909 |
def generate_styles():
""" Create custom style rules """
# Set navbar so it's always at the top
css_string = "#navbar-top{background-color: white; z-index: 100;}"
# Set glossdef tip
css_string += "a.tip{text-decoration:none; font-weight:bold; cursor:pointer; color:#2196F3;}"
css_string += "a.tip:hover{position: relative;border-bottom: 1px dashed #2196F3;}"
# Set glossdef span
css_string += "a.tip span{display: none;background-color: white;font-weight: normal;border:1px solid gray;width: 250px;}"
css_string += "a.tip:hover span{display: block;position: absolute;z-index: 100;padding: 5px 15px;}"
return css_string | 44b36321dedba352c6aa30a352c7cd65cca1f79a | 16,910 |
from pathlib import Path
def get_config_file() -> Path:
"""
Get default config file.
"""
return get_project_root()/'data/config/config.yaml' | 92bc2af7e55b424bcf10355790f377c90a73cf9b | 16,911 |
def kilometers_to_miles(dist_km):
"""Converts km distance to miles
PARAMETERS
----------
dist_km : float
Scalar distance in kilometers
RETURNS
-------
dist_mi : float
Scalar distance in kilometers
"""
return dist_km / 1.609344 | 61707d483961e92dcd290c7b0cd8ba8f650c7b5b | 16,912 |
def _objc_provider_framework_name(path):
"""Returns the name of the framework from an `objc` provider path.
Args:
path: A path that came from an `objc` provider.
Returns:
A string containing the name of the framework (e.g., `Foo` for `Foo.framework`).
"""
return path.rpartition("/")[2].partition(".")[0] | 607c040a9a9c56a793473ffcba779fc7d7a64ed5 | 16,913 |
import os
def create(rosdistro_index_url, extend_path, dir, name, build_tool, verbose):
"""Creates a new workspace, saves it, and switches to it if it is the first
workspace.
:param rosdistro_index_url: The rosdistro to use
:param extend_path: Parent workspace to use.
:param dir: Where to create the workspace
:param name: Name of the new workspace.
:param name: Create the workspace with colcon, instead of catkin_tools
:param verbose: Unused.
"""
# also allow files
if os.path.isfile(rosdistro_index_url):
rosdistro_index_url = 'file://%s' % os.path.realpath(rosdistro_index_url)
try:
urlopen(rosdistro_index_url)
except (ValueError, URLError) as e:
logger.error(e)
return 1
if not os.path.isdir(dir):
logger.error('target path is not a directory')
return 1
enclosing_workspace = find_enclosing_workspace(dir)
if enclosing_workspace:
logger.error("Its not allowed to create a worksapce inside another workspace, other workspace found here:\n%s",
enclosing_workspace)
return 1
# try to guess which build tool to use
if os.path.exists(os.path.join(extend_path, '.catkin')):
build_tool = 'catkin_tools'
else:
build_tool = 'colcon'
if build_tool == 'catkin_tools':
result = create_workspace_with_catkin_tools(extend_path, dir)
elif build_tool == 'colcon':
result = create_workspace_with_colcon(extend_path, dir)
if result:
return result
save(dir, name, verbose)
save_config(dir, rosdistro_index_url=rosdistro_index_url) | 68b00ed62012331893f387a04d83adb6664a8827 | 16,914 |
def sample_publisher(name='EA'):
"""Create and return a sample publisher"""
return Publisher.objects.create(name=name) | da3d859897c9c3a6f98aa9a7950d77d1390a7527 | 16,915 |
def AddGlobalFile(gfile):
"""
Add a global file to the cmd string.
@return string containing knob
"""
string = ''
if gfile:
string = ' --global_file ' + gfile
return string | 70c4bee610766bbea4faf4e463f88ee65f8804f5 | 16,916 |
def get_scale_sequence(scale_0, v_init, a, n_frames):
""" simulates an object's size change from an initial velocity and an acceleration type
"""
scale = scale_0
sequence = [scale]
# TODO
# friction, sinusoidal
for i in range(n_frames-1):
scale = max(0.05, scale + v_init)
if not isinstance(a, str):
v_init = v_init + a
sequence.append(scale)
return sequence | d559672cd2e7b9fa0ff94fb537c48510319a7d53 | 16,917 |
def read_file(filename):
"""Opens the file with the given filename and creates the puzzle in it.
Returns a pair consisting of the puzzle grid and the list of clues. Assumes
that the first line gives the size. Afterwards, the rows and clues are given.
The description of the rows and clues may interleave arbitrarily.
"""
size = 0
out_list = []
rows = []
clues = []
with open(filename, 'r') as file:
for line in file:
line = line.replace('\n', '')
line = split_type(line)
if line[0] == 'SIZE':
size = int(line[1])
elif line[0] == 'ROW':
rows.append(read_row(line[1]))
else:
clues.append(read_clue(line[1]))
return (rows, clues) | 332ea941aef3b484e2083bfcc734d4bc9fd7f62c | 16,918 |
def login_user(request):
"""View to login a new user"""
user = authenticate(username=request.POST['EMail'][:30], password=request.POST['Password'])
if user is not None:
if user.is_active:
login(request, user)
send_email("ROCK ON!!!", "User login - " + user.first_name + " " + user.last_name)
# Redirect to a success page.
return HttpResponse('success')
else:
# Return a 'disabled account' error message
return HttpResponse('Account disabled')
else:
# Return an 'invalid login' error message.
return HttpResponse('Invalid username or password') | ce3c126192df1aeab171438587cdc78a51ebda77 | 16,919 |
def gdf_convex_hull(gdf):
"""
Creates a convex hull around the total extent of a GeoDataFrame.
Used to define a polygon for retrieving geometries within. When calculating
densities for urban blocks we need to retrieve the full extent of e.g.
buildings within the blocks, not crop them to an arbitrary bounding box.
Parameters
----------
gdf : geodataframe
currently accepts a projected gdf
Returns
-------
shapely polygon
"""
### INSERT CHECK FOR CRS HERE?
# project gdf back to geographic coordinates as footprints_from_polygon
# requires it
gdf_temp = ox.projection.project_gdf(gdf, to_latlong=True)
# determine the boundary polygon to fetch buildings within
# buffer originally 0.000225, buffer actually needs to go whole block away
# to get complete highways therefor trying 0.001
boundary=gdf_temp.cascaded_union.convex_hull.buffer(0.001)
# NOTE - maybe more efficient to generate boundary first then reproject second?
return boundary | c636e67b77ed312a952e7f4af3b3535c983417e3 | 16,920 |
from typing import Dict
from typing import Any
def room_operating_mode(mode: str) -> Dict[str, Any]:
"""Payload to set operating mode for
:class:`~pymultimatic.model.component.Room`.
"""
return {"operationMode": mode} | df5d1434d5994eca266a3fd06b6a742710bad0eb | 16,921 |
from typing import Dict
from typing import Any
def _validate_options(data: Dict[str, Any]) -> Dict[str, Any]:
"""
Looks up the exporter_type from the data, selects the correct export
options serializer based on the exporter_type and finally validates the data using
that serializer.
:param data: A dict of data to serialize using an exporter options serializer.
:return: validated export options data
"""
option_serializers = table_exporter_registry.get_option_serializer_map()
validated_exporter_type = validate_data(BaseExporterOptionsSerializer, data)
serializer = option_serializers[validated_exporter_type["exporter_type"]]
return validate_data(serializer, data) | 8d380d3052c3e1cd4d859fa46829034ba1cf6860 | 16,922 |
def householder_name (name, rank):
"""Returns if the name conforms to Householder notation.
>>> householder_name('A_1', 2)
True
>>> householder_name('foobar', 1)
False
"""
base, _, _ = split_name(name)
if base in ['0', '1']:
return True
elif rank == 0:
if base in GREEK_ALPHA:
return True
elif rank == 1:
if len(base) == 1 and base.isalpha() and base.islower():
return True
elif rank == 2:
if len(base) == 1 and base.isupper() and base.isalpha():
return True
return False | 63ff3395e065a79b4d5ee76fb3092efa0cb32b2b | 16,923 |
def calculateDerivatives(x,t,id):
"""
dxdt, x0, id_, x_mean = calculateDerivatives(x,t,id)
Missing data is assumed to be encoded as np.nan
"""
nm = ~np.isnan(t) & ~np.isnan(x) # not missing
id_u = np.unique(id)
id_ = []
dxdt = []
x0 = []
x_mean = []
for k in range(0,len(id_u)):
rowz = id==id_u[k]
rowz = rowz & nm
t_k = t[rowz]
x_k = x[rowz]
if np.sum(rowz)>1:
# Gradient via linear regression
lm = np.polyfit(t_k,x_k,1)
id_.append(id_u[k])
dxdt.append(lm[0])
x0.append(lm[1])
x_mean.append(np.nanmean(x_k))
print('k = {0} \n * n = {1}\n * dx/dt = {2} | x0 = {3} | mean(x) = {4}'.format(k,sum(rowz),dxdt[-1],x0[-1],x_mean[-1]))
#plt.plot(t[rowz],x[rowz],'x')
#plt.plot([min(t[rowz]),max(t[rowz])],[min(t[rowz]),max(t[rowz])]*dxdt[-1] + x0[-1],'-')
#plt.show()
# Remove any nan
dxdt_isnan = np.isnan(dxdt)
x0_isnan = np.isnan(x0)
dxdt = np.delete(dxdt,np.where(dxdt_isnan | x0_isnan)[0])
x0 = np.delete(x0,np.where(dxdt_isnan | x0_isnan)[0])
id_u = np.delete(id_u,np.where(dxdt_isnan | x0_isnan)[0])
return dxdt, x0, id_, x_mean | 86a6e2fc3e50e65ffd162728b79b50ab6ee09a81 | 16,924 |
import requests
import time
def SolveCaptcha(api_key, site_key, url):
"""
Uses the 2Captcha service to solve Captcha's for you.
Captcha's are held in iframes; to solve the captcha, you need a part of the url of the iframe. The iframe is usually
inside a div with id=gRecaptcha. The part of the url we need is the query parameter k, this is called the site_key:
www.google.com/recaptcha/api2/anchor?ar=1&k=6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9&co=aHR0cHM6Ly93d3cuZGljZS5jb206NDQz&hl=en&v=oqtdXEs9TE9ZUAIhXNz5JBt_&size=normal&cb=rpcg9w84syix
k=6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9
Here the site_key is 6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9
You also need to supply the url of the current page you're on.
This function will return a string with the response key from captcha validating the test. This needs to be inserted
into an input field with the id=g-recaptcha-response.
:param api_key: The 2Captcha API key.
:param site_key: The site_key extracted from the Captcha iframe url
:param url: url of the site you're on
:return: The response from captcha validating the test
"""
print("Solving Captcha...")
print("Sending Request...")
request_response = requests.get("https://2captcha.com/in.php?", params={
"googlekey": site_key,
"method": "userrecaptcha",
"pageurl": url,
"key": api_key,
"json": 1,
"invisible": 0,
})
request_response.raise_for_status()
print("Waiting for Response...")
time.sleep(30)
answer_response_json = {'status': 0, 'request': 'CAPCHA_NOT_READY'}
while answer_response_json['request'] == 'CAPCHA_NOT_READY':
answer_response = requests.get("https://2captcha.com/res.php", params={
"key": api_key,
"action": "get",
"id": request_response.json()['request'],
"json": 1
})
answer_response_json = answer_response.json()
print(answer_response_json)
time.sleep(5)
if answer_response_json['status'] == 1:
print("Solved!")
return answer_response_json['request']
elif answer_response_json['request'] == 'ERROR_CAPTCHA_UNSOLVABLE':
raise TimeoutError("ERROR_CAPTCHA_UNSOLVABLE")
else:
raise Exception(answer_response_json['request']) | e610a265d03be65bfd6321a266776a8102c227d0 | 16,925 |
import os
import mimetypes
def send_asset(asset_file_name):
"""Return an asset.
Args:
asset_file_name: The path of the asset file relative to the assets folder.
Returns:
The asset specified in the URL.
"""
asset_path = f"assets/{asset_file_name}"
asset_size = os.path.getsize(asset_path)
with open(asset_path, "rb") as asset_file:
asset_etag = RangeRequest.make_etag(asset_file)
asset_response = RangeRequest(
open(asset_path, "rb"), # noqa: WPS515
etag=asset_etag,
last_modified=server_boot_time,
size=asset_size,
).make_response()
asset_response.mimetype = mimetypes.guess_type(asset_file_name)[0]
return asset_response | f259a03d5a579d07a8110656ad5e5f50a533b9ce | 16,926 |
def clean_crn(crn, duplicates = True, trivial = True, inter = None):
"""Takes a crn and removes trivial / duplicate reactions. """
new = []
seen = set()
for [R, P] in crn:
lR = sorted(interpret(R, inter)) if inter else sorted(R)
lP = sorted(interpret(P, inter)) if inter else sorted(P)
tR = tuple(lR)
tP = tuple(lP)
if trivial and tR == tP:
continue
if duplicates and (tR, tP) in seen:
continue
new.append([lR, lP])
seen.add((tR, tP))
return new | 28f4e8eac7b6aea0505491ef55ce54d8d05f0069 | 16,927 |
def get_db_mapping(mesh_id):
"""Return mapping to another name space for a MeSH ID, if it exists.
Parameters
----------
mesh_id : str
The MeSH ID whose mappings is to be returned.
Returns
-------
tuple or None
A tuple consisting of a DB namespace and ID for the mapping or None
if not available.
"""
return mesh_to_db.get(mesh_id) | ae3f8de5c93ab0230a7c87edfa2d3996a9c8667b | 16,928 |
def MC_dBESQ_gateway(N = 10**6, t = 0, n0 = 0, test = 'laguerre', method = 'laguerre', args = [], num_decimal = 4):
"""
Monte Carlo estimator of expected dBESQ using birth-death simulation, exact BESQ solution, dLaguerre simulation
or PDE systems.
:param N: int, Number of simulations
:param T: positive float, Simulation horizon
:param x0: initial value of X
:param method: simulation method, currently support {'birth-death', 'exact-besq', 'laguerre', 'pde'}
:param test: defines test function
:args: arguments to define test function
"""
if method == 'birth-death':
if test == 'laguerre':
f = lambda n : eval_laguerre(n, 1)
xt_array = bd_simulator(t, x0=n0, num_paths=N, method='bessel', num_threads=4)
return np.mean(f(xt_array)).round(num_decimal)
elif method == 'exact-besq':
if test == 'laguerre':
return np.mean(exp(-t+1)*jv(0, 2*np.sqrt(np.random.gamma(n0+1)))).round(num_decimal)
elif method == 'laguerre':
if test == 'laguerre':
f = lambda n : eval_laguerre(n, 1)
s = log(t / 2)
def poisson_x0():
return np.random.poisson(np.random.gamma(n0+1))
xt_array = bd_simulator(s, x0=poisson_x0, num_paths=N, method='laguerre', num_threads=4)
return np.mean(f(np.random.poisson(t/2 *np.random.gamma(xt_array+1)))).round(num_decimal) | 6d09ca8ef2f772e194c7ae656ec4bf9e8a2b6948 | 16,929 |
import sys
def locate_app(app_id):
"""Attempts to locate the application."""
if app_id is None:
return find_app_in_cwd()
if ':' in app_id:
module, app_obj = app_id.split(':', 1)
else:
module = app_id
app_obj = None
__import__(module)
mod = sys.modules[module]
if app_obj is None:
app = find_best_app(mod)
else:
app = getattr(mod, app_obj, None)
if app is None:
raise RuntimeError('Failed to find application in module "%s"'
% module)
return app | 027b199a602fb81950cd10eea52276035ac9a045 | 16,930 |
def resolve_image(image):
""" Resolve an informal image tag into a full Docker image tag. Any tag
available on Docker Hub for Neo4j can be used, and if no 'neo4j:' prefix
exists, this will be added automatically. The default edition is
Community, unless a cluster is being created in which case Enterprise
edition is selected instead. Explicit selection of Enterprise edition can
be made by adding an '-enterprise' suffix to the image tag.
If a 'file:' URI is passed in here instead of an image tag, the Docker
image will be loaded from that file instead.
Examples of valid tags:
- 3.4.6
- neo4j:3.4.6
- latest
- file:/home/me/image.tar
"""
if image.startswith("file:"):
return load_image_from_file(image[5:])
elif ":" in image:
return image
else:
return "neo4j:" + image | 7d03b936f90c459a2dade179d9e38bd17c8c1af8 | 16,931 |
def _cifar_meanstd_normalize(image):
"""Mean + stddev whitening for CIFAR-10 used in ResNets.
Args:
image: Numpy array or TF Tensor, with values in [0, 255]
Returns:
image: Numpy array or TF Tensor, shifted and scaled by mean/stdev on
CIFAR-10 dataset.
"""
# Channel-wise means and std devs calculated from the CIFAR-10 training set
cifar_means = [125.3, 123.0, 113.9]
cifar_devs = [63.0, 62.1, 66.7]
rescaled_means = [x / 255. for x in cifar_means]
rescaled_devs = [x / 255. for x in cifar_devs]
image = (image - rescaled_means) / rescaled_devs
return image | 286ab555d30fd779c093e3b8801821f8370e1ca8 | 16,932 |
def get_value_counts_and_frequencies(elem: Variable, data: pd.DataFrame) -> Categories:
"""Call function to generate frequencies depending on the variable type
Input:
elem: dict
data: pandas DataFrame
Output:
statistics: OrderedDict
"""
statistics: Categories = Categories()
_scale = elem["scale"]
statistics.update(get_categorical_frequencies(elem, data))
return statistics | 7afe35dc605c1eb25158c8a948eeabcfb0027dc6 | 16,933 |
def determineLinearRegions(data, minLength=.1, minR2=.96, maxSlopeInterceptDiff=.75):
"""
Determine regions of a plot that are approximately linear by performing
linear least-squares on a rolling window.
Parameters
----------
data : array_like
Data within which linear regions are to be identified
minLength : int or float
The minimum length of a linear segment, either as an
integer number of indices, or as a float fraction of the
overall data length.
minR2 : float
The minimum r-squared value for a region to be
considered linear.
maxSlopeInterceptDiff : float
The float percentage difference allowed between slopes
and intercepts of adjacent slices for them to be
considered the same region.
Returns
-------
regionIndices : np.ndarray[N,2]
The start and end indices for the N detected regions.
slopes : np.ndarray[N]
The slope of each region.
intercepts : np.ndarray[N]
The intercept of each region.
"""
if minLength < 1:
minLinSteps = int(len(data)*minLength)
else:
minLinSteps = int(minLength)
inLinearRegion = False
linearRegions = []
slopes = []
intercepts = []
# Perform least squares on a rolling window
i = 0
while i < len(data) - minLinSteps:
xArr = np.arange(i, i+minLinSteps)
slope, intercept, r2, p_value, std_err = linregress(xArr, data[i:i+minLinSteps])
if np.abs(r2) > minR2:
if inLinearRegion:
# Calculate how different new slope is from old one
if np.abs((np.mean(slopes[-1]) - slope) / np.mean(slopes[-1])) < maxSlopeInterceptDiff and np.abs((np.mean(intercepts[-1]) - intercept) / np.mean(intercepts[-1])) < maxSlopeInterceptDiff:
# This is still the same linear region, so we extend the bounds
linearRegions[-1][1] = i+minLinSteps
# And average in the slopes and intercepts
slopes[-1] += [slope]
intercepts[-1] += [intercept]
else:
# Otherwise, we have a new linear region, which we start
# at the end of the other one
i = linearRegions[-1][1]
inLinearRegion = False
continue
else:
# New linear region
linearRegions.append([i, i+minLinSteps])
slopes.append([slope])
intercepts.append([intercept])
inLinearRegion = True
else:
inLinearRegion = False
i += 1
slopes = np.array([np.mean(s) for s in slopes])
intercepts = np.array([np.mean(inter) for inter in intercepts])
return np.array(linearRegions), slopes, intercepts | 318672634082ae87b18f087e8aee65efc1da3f59 | 16,934 |
def compute_dispersion(aperture, beam, dispersion_type, dispersion_start,
mean_dispersion_delta, num_pixels, redshift, aperture_low, aperture_high,
weight=1, offset=0, function_type=None, order=None, Pmin=None, Pmax=None,
*coefficients):
"""
Compute a dispersion mapping from a IRAF multi-spec description.
:param aperture:
The aperture number.
:param beam:
The beam number.
:param dispersion_type:
An integer representing the dispersion type:
0: linear dispersion
1: log-linear dispersion
2: non-linear dispersion
:param dispersion_start:
The value of the dispersion at the first physical pixel.
:param mean_dispersion_delta:
The mean difference between dispersion pixels.
:param num_pixels:
The number of pixels.
:param redshift:
The redshift of the object. This is accounted for by adjusting the
dispersion scale without rebinning:
>> dispersion_adjusted = dispersion / (1 + redshift)
:param aperture_low:
The lower limit of the spatial axis used to compute the dispersion.
:param aperture_high:
The upper limit of the spatial axis used to compute the dispersion.
:param weight: [optional]
A multiplier to apply to all dispersion values.
:param offset: [optional]
A zero-point offset to be applied to all the dispersion values.
:param function_type: [optional]
An integer representing the function type to use when a non-linear
dispersion mapping (i.e. `dispersion_type = 2`) has been specified:
1: Chebyshev polynomial
2: Legendre polynomial
3: Cubic spline
4: Linear spline
5: Pixel coordinate array
6: Sampled coordinate array
:param order: [optional]
The order of the Legendre or Chebyshev function supplied.
:param Pmin: [optional]
The minimum pixel value, or lower limit of the range of physical pixel
coordinates.
:param Pmax: [optional]
The maximum pixel value, or upper limit of the range of physical pixel
coordinates.
:param coefficients: [optional]
The `order` number of coefficients that define the Legendre or Chebyshev
polynomial functions.
:returns:
An array containing the computed dispersion values.
"""
if dispersion_type in (0, 1):
# Simple linear or logarithmic spacing
dispersion = \
dispersion_start + np.arange(num_pixels) * mean_dispersion_delta
if dispersion_start == 1:
dispersion = 10.**dispersion
elif dispersion_type == 2:
# Non-linear mapping.
if function_type is None:
raise ValueError("function type required for non-linear mapping")
elif function_type not in range(1, 7):
raise ValueError(
"function type {0} not recognised".format(function_type))
if function_type == 1:
order = int(order)
n = np.linspace(-1, 1, Pmax - Pmin + 1)
temp = np.zeros((Pmax - Pmin + 1, order), dtype=float)
temp[:, 0] = 1
temp[:, 1] = n
for i in range(2, order):
temp[:, i] = 2 * n * temp[:, i-1] - temp[:, i-2]
for i in range(0, order):
temp[:, i] *= coefficients[i]
dispersion = temp.sum(axis=1)
elif function_type == 2:
# Legendre polynomial.
if None in (order, Pmin, Pmax, coefficients):
raise TypeError("order, Pmin, Pmax and coefficients required "
"for a Chebyshev or Legendre polynomial")
Pmean = (Pmax + Pmin)/2
Pptp = Pmax - Pmin
x = (np.arange(num_pixels) + 1 - Pmean)/(Pptp/2)
p0 = np.ones(num_pixels)
p1 = mean_dispersion_delta
dispersion = coefficients[0] * p0 + coefficients[1] * p1
for i in range(2, int(order)):
if function_type == 1:
# Chebyshev
p2 = 2 * x * p1 - p0
else:
# Legendre
p2 = ((2*i - 1)*x*p1 - (i - 1)*p0) / i
dispersion += p2 * coefficients[i]
p0, p1 = (p1, p2)
elif function_type == 3:
# Cubic spline.
if None in (order, Pmin, Pmax, coefficients):
raise TypeError("order, Pmin, Pmax and coefficients required "
"for a cubic spline mapping")
s = (np.arange(num_pixels, dtype=float) + 1 - Pmin)/(Pmax - Pmin) \
* order
j = s.astype(int).clip(0, order - 1)
a, b = (j + 1 - s, s - j)
x = np.array([
a**3,
1 + 3*a*(1 + a*b),
1 + 3*b*(1 + a*b),
b**3])
dispersion = np.dot(np.array(coefficients), x.T)
else:
raise NotImplementedError("function type not implemented yet")
else:
raise ValueError(
"dispersion type {0} not recognised".format(dispersion_type))
# Apply redshift correction.
dispersion = weight * (dispersion + offset) / (1 + redshift)
return dispersion | 94fcb70652bad0f2fa26cf73981129f3ae949d8b | 16,935 |
def normalize_pcp_area(pcp):
"""
Normalizes a pcp so that the sum of its content is 1,
outputting a pcp with up to 3 decimal points.
"""
pcp = np.divide(pcp, np.sum(pcp))
new_format = []
for item in pcp:
new_format.append(item)
return np.array(new_format) | ea0feeda3f8515b538ae62b08aad09a16ddb2a73 | 16,936 |
def calc_line_flux(spec, ws, ivar, w0, w1, u_flux):
""" calculate the flux and flux error of the line within the range w0 and w1 using trapz rule"""
u_spec = spec.unit
u_ws = ws.unit
ivar = ivar.to(1./(u_spec**2))
spec_uless = np.array(spec)
ws_uless = np.array(ws)
ivar_uless = np.array(ivar)
if ivar.unit != (1./(spec.unit**2)):
raise Exception("[spector] spec and ivar units inconsistent")
# select region to integrate
select_ws = (ws_uless > w0) & (ws_uless < w1)
ws_sel = ws_uless[select_ws]
spec_sel = spec_uless[select_ws]
ivar_sel = ivar_uless[select_ws]
var_sel = 1./ivar_sel
# integrate
f, fvar = trapz_var(x=ws_sel, y=spec_sel, yvar=var_sel)
f = (f*u_spec*u_ws).to(u_flux)
ferr = (np.sqrt(fvar)*u_spec*u_ws).to(u_flux)
return f, ferr | 78206ce98025ead64e207bd69a8adf1a31178744 | 16,937 |
def boolean(entry, option_key="True/False", **kwargs):
"""
Simplest check in computer logic, right? This will take user input to flick the switch on or off
Args:
entry (str): A value such as True, On, Enabled, Disabled, False, 0, or 1.
option_key (str): What kind of Boolean we are setting. What Option is this for?
Returns:
Boolean
"""
error = f"Must enter 0 (false) or 1 (true) for {option_key}. Also accepts True, False, On, Off, Yes, No, Enabled, and Disabled"
if not isinstance(entry, str):
raise ValueError(error)
entry = entry.upper()
if entry in ("1", "TRUE", "ON", "ENABLED", "ENABLE", "YES"):
return True
if entry in ("0", "FALSE", "OFF", "DISABLED", "DISABLE", "NO"):
return False
raise ValueError(error) | d62b36d08651d02719b5866b7798c36efd2a018f | 16,938 |
import tqdm
import copy
def edge_preserving_filter(ref_map: np.ndarray, guided_image: np.ndarray,
window_size: int, epsilon: float = 1e-10) -> np.ndarray:
"""
Perform edge - preserving filtering on the newly created reference map.
:param ref_map: Classification reference map.
:param guided_image: Guided image as a mean over all bands from hyperspectral data.
:param window_size: Size of the convolving window.
:param epsilon: Regularizer constant.
:return: Improved classification map.
"""
print("Window size = {}".format(window_size))
col_indexes, row_indexes = \
range(0, ref_map.shape[ROW_AXIS], window_size), range(0, ref_map.shape[COLUMNS_AXIS], window_size)
print("Calculating coefficients:")
a_k_map, b_k_map = np.empty(shape=ref_map.shape), np.empty(shape=ref_map.shape)
for i in tqdm(range(ref_map.shape[SPECTRAL_AXIS]), total=ref_map.shape[SPECTRAL_AXIS]):
for row, col in product(col_indexes, row_indexes):
p_k = copy(ref_map[row:row + window_size, col:col + window_size, i])
i_k = copy(guided_image[row:row + window_size, col:col + window_size])
sum_ = np.sum(i_k * p_k - np.mean(i_k) * np.mean(p_k)) / (window_size ** 2)
a_k = sum_ / (np.var(i_k) + epsilon)
b_k = np.mean(p_k) - a_k * np.mean(i_k)
a_k_map[row:row + window_size, col:col + window_size, i] = a_k
b_k_map[row:row + window_size, col:col + window_size, i] = b_k
output_image = np.empty(shape=ref_map.shape)
print("Calculating new \"improved\" classification map:")
for i in tqdm(range(ref_map.shape[SPECTRAL_AXIS]), total=ref_map.shape[SPECTRAL_AXIS]):
for row_index, col_index in product(range(ref_map.shape[ROW_AXIS]), range(ref_map.shape[COLUMNS_AXIS])):
a_k_sum, b_k_sum = 0, 0
row_sub_indexes, col_sub_indexes = \
list(filter(lambda x: 0 <= x < ref_map.shape[ROW_AXIS],
list(range(row_index - floor(window_size / 2),
row_index + ceil(window_size / 2))))), \
list(filter(lambda x: 0 <= x < ref_map.shape[COLUMNS_AXIS],
list(range(col_index - floor(window_size / 2),
col_index + ceil(window_size / 2)))))
for sub_row_idx, sub_col_idx in product(row_sub_indexes, col_sub_indexes):
a_k_sum += a_k_map[sub_row_idx, sub_col_idx, i]
b_k_sum += b_k_map[sub_row_idx, sub_col_idx, i]
a_k_sum, b_k_sum = a_k_sum / (row_sub_indexes.__len__() * col_sub_indexes.__len__()), \
b_k_sum / (row_sub_indexes.__len__() * col_sub_indexes.__len__())
output_image[row_index, col_index, i] = a_k_sum * guided_image[row_index, col_index] + b_k_sum
output_image = np.argmax(output_image, axis=-1) + BG_CLASS
return output_image | 1ca88240c26fd4eae67f869bc95a8b0ce885260b | 16,939 |
from typing import List
def preprocess_annotated_utterance(
annotated_utterance: str,
not_entity: str = NOT_ENTITY,
) -> List[str]:
"""Character Level Entity Label Producer
Named-entity of each character is extracted by XML-like annotation.
Also, they would be collected in a list conform to the order of characters
in the sentence.
Args:
annotated_utterance (a string):
An utterance with annotations looks like <a>blabla</a>.
It is a special format for labeling named-entity in an utterance.
not_entity (a string, default = "DONT_CARE"):
A representation of words that we don't care about.
Returns:
entities (a list of string):
A list of named-entity labels in character level.
Examples:
>>> from ynlu.sdk.evaluation.utils import preprocess_annotated_utterance
>>> preprocess_annotated_utterance(
annotated_utterance="<drink>Coffee</drink>, please.",
not_entity="n",
)
>>> ["drink", "drink", "drink", "drink", "drink", "drink", "n",
"n", "n", "n", "n", "n", "n", "n", "n"]
"""
clean_utterance = remove_annotation(annotated_utterance)
entity_word_pair = FINDALL_PROG.findall(annotated_utterance)
entities = [not_entity] * len(clean_utterance)
begin_index = 0
for entity, word in entity_word_pair:
start_idx = clean_utterance.find(word, begin_index)
if start_idx == -1:
raise ValueError(
"Word {} can not be found in {}".format(word, clean_utterance),
)
entities[start_idx: start_idx + len(word)] = [entity] * len(word)
begin_index = start_idx + len(word)
return entities | b148f19017b97a0f4859abc129cdca50fd187c15 | 16,940 |
from typing import List
from typing import Tuple
import jinja2
def generate_constant_table(
name: str,
constants: List[Constant],
*,
data_type: str = "LREAL",
guid: str = "",
lookup_by_key: bool = False,
**kwargs
) -> Tuple[str, str]:
"""
Generate a GVL constant table, with no interpolation.
Parameters
----------
name : str
The code block name.
constants : list of Constant
Dictionary of name to dataframe.
data_type : str, optional
The data type. Defaults to LREAL.
guid : str, optional
The function block globally unique identifier / GUID.
table_prefix : str, optional
The name with which to prefix all table arrays.
lookup_input : str, optional
The function block input variable name - the indexed parameter which
you're looking up in the table.
lookup_index : int, optional
The per-row array index of the lookup value. Not fully supported
just let; leave this at 0 for now.
row_delta_variable : str, optional
The auto-generated code delta variable. Not necessary to set, unless
you really want to customize the output.
**kwargs :
Additional keyword arguments to pass to or override in the template.
Returns
-------
code : str
The constant table source code.
"""
template_kw = dict(
name=name,
guid=guid or guid_from_string(name),
data_type=data_type,
constants=constants,
)
template_kw.update(kwargs)
template_fn = (
CONSTANT_GVL_LOOKUP_TEMPLATE
if lookup_by_key
else CONSTANT_GVL_TEMPLATE
)
template = jinja2.Template(open(template_fn, "rt").read())
return template.render(template_kw) | 54b491ac3673c68a0e7ef819389e393e921d841f | 16,941 |
def filter_stop_words(text):
"""
Filter all stop words from a string to reduce headline size.
:param text: text to filter
:return: shortened headline
"""
words = filter(lambda w: not w in s, text.split())
line = ""
l = 0
for w in words:
if l < 20:
line += w + " "
l += 1
else:
return line.strip()
return line.strip() | d27b63018fa8f7b2d072e768c54ce4a056c58ff1 | 16,942 |
def importPublicKey(publickey):
""" Cette fonction permet de exporter la clé public,
elle prend en paramètre use clé public """
return RSA.importKey(publickey) | b744efc95fc154edcf4149134b7b307e75a0bb17 | 16,943 |
def _format_contact(resource, key):
"""
Return the contact field with the correct values.
This is mainly stripping out the unecessary fields from the telecom part of
the response.
"""
contacts = resource.pop(key)
resource[key] = []
for contact in contacts:
contact["telecom"] = _format_telecom(
contact,
"telecom",
add_textphone_extension=False,
whitelist=["id", "use", "period", "extension"]
)
resource[key].append(contact)
return resource[key] | 57291dfdf2a724df2cd2342891aa96309648a9c1 | 16,944 |
from datetime import datetime
def get_day_type(date):
"""
Returns if a date is a weeday or weekend
:param date datetime:
:return string:
"""
# check if date is a datetime.date
if not isinstance(date, datetime.date):
raise TypeError('date is not a datetime.date')
day_type = ""
if date.weekday() in (0, 1, 2, 3, 4):
day_type = c.WEEKDAY
else:
day_type = c.WEEKEND
return day_type | 72d74746a7782e0f45b3b7d0292b4cbd4ad9f167 | 16,945 |
def case_insensitive_equals(name1: str, name2: str) -> bool:
"""
Convenience method to check whether two strings match, irrespective of their case and any surrounding whitespace.
"""
return name1.strip().lower() == name2.strip().lower() | 28b7e5bfb5e69cf425e1e8983895f1ad42b59342 | 16,946 |
def get_access_token(cmd, subscription=None, resource=None, scopes=None, resource_type=None, tenant=None):
"""
get AAD token to access to a specified resource.
Use 'az cloud show' command for other Azure resources
"""
if resource is None and resource_type:
endpoints_attr_name = cloud_resource_type_mappings[resource_type]
resource = getattr(cmd.cli_ctx.cloud.endpoints, endpoints_attr_name)
profile = Profile(cli_ctx=cmd.cli_ctx)
creds, subscription, tenant = profile.get_raw_token(subscription=subscription, resource=resource, scopes=scopes,
tenant=tenant)
result = {
'tokenType': creds[0],
'accessToken': creds[1],
# 'expires_on': creds[2].get('expires_on', None),
'expiresOn': creds[2].get('expiresOn', None),
'tenant': tenant
}
if subscription:
result['subscription'] = subscription
return result | 9a5190db41e4061698ead3846a6e53f42e64deed | 16,947 |
def ensure_listable(obj):
"""Ensures obj is a list-like container type"""
return obj if isinstance(obj, (list, tuple, set)) else [obj] | bdc5dbe7e06c1cc13afde28762043ac3fb65e5ac | 16,948 |
def merge_dicts(*dicts: dict) -> dict:
"""Merge dictionaries into first one."""
merged_dict = dicts[0].copy()
for dict_to_merge in dicts[1:]:
for key, value in dict_to_merge.items():
if key not in merged_dict or value == merged_dict[key]:
merged_dict[key] = value
else:
raise ValueError(
f"Test {key} already has a mark we don't want to overwrite: \n"
f"- existing: {merged_dict[key]} "
f"- new value: {value}"
)
merged_dict.update(dict_to_merge)
return merged_dict | b32a9f4bed149144a3f75b43ed45c8de4351f3d1 | 16,949 |
from re import X
def winner(board):
"""
Returns the winner of the game, if there is one.
"""
for moves in _winner_moves():
if all(board[i][j] is X for (i, j) in moves):
return X
elif all(board[i][j] is O for (i, j) in moves):
return O
return None | c6e3b35b2cf37ff3da4fe5cd306f7a6f78603f16 | 16,950 |
def specified_kwargs(draw, *keys_values_defaults: KVD):
"""Generates valid kwargs given expected defaults.
When we can't realistically use hh.kwargs() and thus test whether xp infact
defaults correctly, this strategy lets us remove generated arguments if they
are of the default value anyway.
"""
kw = {}
for keyword, value, default in keys_values_defaults:
if value is not default or draw(booleans()):
kw[keyword] = value
return kw | bd3dfdcbb084a87b0c60d9221692f8fbd3e70333 | 16,951 |
def add_image():
"""User uploads a new landmark image, and inserts into db."""
imageURL = request.form.get("imageURL")
landmark_id = request.form.get("landmark_id")
new_image = LandmarkImage(landmark_id=landmark_id,
imageurl=imageURL)
db.session.add(new_image)
db.session.commit()
return "Success" | dce5f9c21daef67b1a13b1d590fe027213c408e0 | 16,952 |
def merge(link1: Node, link2: Node) -> Node:
"""
Merge two linklists.
Parameters
-----------
link1: Node
link2: Node
Returns
---------
out: Node
Notes
------
"""
link = Node(None)
ptr = link
while link1 and link2:
if link1.val <= link2.val:
ptr.next = link1 #Node(link1.val)
ptr = ptr.next
link1 = link1.next
else:
ptr.next = link2 #Node(link2.val)
ptr = ptr.next
link2 = link2.next
while link1:
ptr.next = Node(link1.val)
ptr = ptr.next
link1 = link1.next
while link2:
ptr.next = Node(link2.val)
ptr = ptr.next
link2 = link2.next
return link.next | 5d40acbd1ffb595a7f4605c3181ede19fb4adbb3 | 16,953 |
def l1_norm_optimization(a_i, b_i, c_i, w_i=None):
"""Solve l1-norm optimization problem."""
cvx.solvers.options['show_progress'] = not CVX_SUPRESS_PRINT
# Non-Weighted optimization:
if w_i is None:
# Problem must be formulated as sum |P*x - q|
P = cvx.matrix([[cvx.matrix(a_i)], [cvx.matrix(b_i)]])
q = cvx.matrix(c_i * -1)
# Solve the l1-norm problem
u = cvx.l1.l1(P, q)
# Get results
x0, y0 = u[0], u[1]
# Weighted optimization:
else:
# Problem must be formulated as sum |P*x - q|
P = cvx.matrix([[cvx.matrix(np.multiply(a_i, w_i))],
[cvx.matrix(np.multiply(b_i, w_i))]])
q = cvx.matrix(np.multiply(w_i, c_i * -1))
# Solve the l1-norm problem
u = cvx.l1.l1(P, q)
# Get results
x0, y0 = u[0], u[1]
# return resulting point
return (x0, y0) | 0966516185c99b936a1fcc8b3d4c74e67587bc63 | 16,954 |
def set_order(market, order_type, amount, price, keys, stop_price=None):
"""
Create an order
Arguments:
market (str) : market name,
order_type (str) : may be "limit", "market", "market_by_quote",
"limit_stop_loss"
amount (float) : positive if BUY order, and negative for SELL
price (float) : price of 1 ask currency in a quoted currency. Necessary only
when type is "limit"
keys (dict): {
"private" : "",
"public" : ""
}
Optional arguments:
stop_price (float) : price when activates "limit_stop_loss" type order. If
None then the same as price
Returns:
(list) [
[0] (int) order ID,
[1] (NoneType) not in use,
[2] (NoneType) not in use,
[3] (str) name of the market,
[4] (int) time stamp of the creation in ms,
[5] (int) time stamp of the update in ms,
[6] (str) initial volume,
[7] (str) order volume,
[8] (str) order type ("LIMIT" or "MARKET"),
[9] (NoneType) not in use,
[10] (NoneType) not in use,
[11] (NoneType) not in use,
[12] (NoneType) not in use,
[13] (str) order status,
[14] (NoneType) not in use,
[15] (NoneType) not in use,
[16] (str) order price,
[17] (str) average price of deals in order,
[18] (NoneType) not is use,
[19] (str) for stop price but None for other orders,
[20] (NoneType) not in use,
[21] (NoneType) not in use,
[22] (NoneType) not in use,
[23] (NoneType) not in use,
[24] (NoneType) not in use,
[25] (NoneType) not in use,
[26] (NoneType) not in use,
[27] (NoneType) not in use,
[28] (NoneType) not in use,
[29] (NoneType) not in use,
[30] (NoneType) not in use,
[31] (NoneType) not in use,
]
"""
body = {
"symbol": market,
"type": order_type,
"amount": amount,
"price": price,
"stop_price": price,
}
return _request("auth/w/order/submit", body=body, keys=keys) | 8f9823be8a39d404062114432604c8480aed20c6 | 16,955 |
import calendar
def convert_ts(tt):
"""
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
"""
try:
ts = calendar.timegm(tt)
"""
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
"""
if ts < 0:
ts = 0
except TypeError:
ts = None
return ts | a3c2f5ae3d556290b6124d60fd4f84c1c2685195 | 16,956 |
def data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_name_post(uuid, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_name_post
creates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | fef62699a5a16385ffeeb47f252c1a3142fa9c96 | 16,957 |
import re
import json
def receive_github_hook(request):
"""a hook is sent on some set of events, specifically:
push/deploy: indicates that the content for the repository changed
pull_request: there is an update to a pull request.
This function checks that (globally) the event is valid, and if
so, runs a function depending on the event.
"""
# We do these checks again for sanity
if request.method == "POST":
if DISABLE_WEBHOOKS:
return JsonResponseMessage(message="Webhooks disabled")
if not re.search("GitHub-Hookshot", request.META["HTTP_USER_AGENT"]):
return JsonResponseMessage(message="Agent not allowed")
# Only allow application/json content type
if request.META["CONTENT_TYPE"] != "application/json":
return JsonResponseMessage(message="Incorrect content type")
# Check that it's coming from the right place
required_headers = ["HTTP_X_GITHUB_DELIVERY", "HTTP_X_GITHUB_EVENT"]
if not check_headers(request, required_headers):
return JsonResponseMessage(message="Agent not allowed")
# Has to be a push, deployment, or pull_request
event = request.META["HTTP_X_GITHUB_EVENT"]
# Ping happens on setup
if event == "ping":
return JsonResponseMessage(
message="Ping received, no action taken.", status=200
)
# But don't allow types beyond push, deploy, pr
if event not in ["push", "deployment", "pull_request", "repository"]:
return JsonResponseMessage(message="Incorrect delivery method.")
# A signature is also required
signature = request.META.get("HTTP_X_HUB_SIGNATURE")
if not signature:
return JsonResponseMessage(message="Missing credentials.")
# Parse the body
payload = load_body(request)
repo = payload.get("repository")
repo_name = repo["full_name"]
# If it's a repository event, might be transferred or renamed
if event == "repository":
if payload.get("action") == "transferred":
owner = payload["changes"]["owner"]["from"]["user"]["login"]
repo_name = "%s/%s" % (owner, repo.get("name"))
# Retrieve the article
try:
article = Article.objects.get(repo__full_name=repo_name)
except Article.DoesNotExist:
return JsonResponseMessage(message="Article not found", status=404)
# Don't continue if the repository is archived (this shouldn't happen)
if article.archived:
return JsonResponseMessage(message="Repository is archived.")
# Validate the payload with the collection secret
status = validate_payload(
secret=str(article.secret),
payload=request.body,
request_signature=signature,
)
if not status:
return JsonResponseMessage(message="Invalid credentials.")
# Branch must be master
branch = payload.get("ref", "refs/heads/master").replace("refs/heads/", "")
# Update repo metadata that might change
article.repo = repo
article.save()
# Submit job with django_rq to update article
if event == "pull_request":
against_branch = payload["pull_request"]["base"]["ref"]
branch = payload["pull_request"]["head"]["ref"]
if not branch.startswith("update/term") or against_branch != "master":
return JsonResponseMessage(message="Ignoring branch.", status=200)
# Requesting user is derived from branch
user = branch.replace("update/term-", "").split("-")[0]
res = django_rq.enqueue(
update_pullrequest,
article_uuid=article.uuid,
user=user,
action=payload["action"],
url=payload["pull_request"]["html_url"],
number=payload["number"],
merged_at=payload["pull_request"]["merged_at"],
)
elif event in ["push", "deployment"]:
if branch != "master":
return JsonResponseMessage(message="Ignoring branch.", status=200)
article.commit = payload["after"]
article.save()
res = django_rq.enqueue(update_article, article_uuid=article.uuid)
elif event == "repository":
res = django_rq.enqueue(
repository_change,
article_uuid=article.uuid,
action=payload["action"],
repo=json.dumps(payload["repository"]),
)
return JsonResponseMessage(
message="Hook received and parsing.", status=200, status_message="Received"
)
return JsonResponseMessage(message="Invalid request.") | 874a71c3c9c002f3714ce0b4bb80586e8d67d7e8 | 16,958 |
import torch
def dice(y, t, normalize=True, class_weight=None,
ignore_label=-1, reduce='mean', eps=1e-08):
""" Differentable Dice coefficient.
See: https://arxiv.org/pdf/1606.04797.pdf
Args:
y (~torch.Tensor): Probability
t (~torch.Tensor): Ground-truth label
normalize (bool, optional): If True, calculate the dice coefficients for each class and take the average. Defaults to True.
class_weight (list or ndarray, optional): Defaults to None.
ignore_label (int, optional): Defaults to -1.
reduce (str, optional): Defaults to 'mean'.
eps (float, optional): Defaults to 1e-08.
"""
_check_type_forward(y, t)
device = y.device
dtype = y.dtype
if class_weight is not None:
class_weight = torch.as_tensor(class_weight, dtype=dtype, device=device)
b, c = y.shape[:2]
t_onehot = to_onehot(t, n_class=c)
y = y.view(b, c, -1)
t_onehot = t_onehot.view(b, c, -1)
if ignore_label != -1:
t_onehot = torch.cat( (t_onehot[:, :ignore_label], t_onehot[:, ignore_label + 1:]), dim=1)
y = torch.cat( (y[:, :ignore_label], y[:, ignore_label + 1:]), dim=1)
intersection = y * t_onehot
cardinality = y + t_onehot
if normalize: # NOTE: channel-wise
intersection = torch.sum(intersection, dim=-1)
cardinality = torch.sum(cardinality, dim=-1)
ret = (2. * intersection / (cardinality + eps))
if class_weight is not None:
ret *= class_weight
ret = torch.mean(ret, dim=1)
else:
intersection = torch.sum(intersection, dim=(0, 2))
cardinality = torch.sum(cardinality, dim=(0, 2))
ret = (2. * intersection / (cardinality + eps))
if class_weight is not None:
ret *= class_weight
if reduce == 'mean':
ret = torch.mean(ret)
else:
raise NotImplementedError('unsupported reduce type..')
return ret | dd0b6fb75688ed0579a3bf9a513f73d0b785e57e | 16,959 |
def read_start_params(path_or_database):
"""Load the start parameters DataFrame.
Args:
path_or_database (pathlib.Path, str or sqlalchemy.MetaData)
Returns:
params (pd.DataFrame): see :ref:`params`.
"""
database = load_database(**_process_path_or_database(path_or_database))
optimization_problem = read_last_rows(
database=database,
table_name="optimization_problem",
n_rows=1,
return_type="dict_of_lists",
)
start_params = optimization_problem["params"][0]
return start_params | 31cc6d5f538a8616f9eda676e4bf8757f02f1cb3 | 16,960 |
def load_single_rec_into_tables_obj(src_dbreq,
schema_engine,
psql_schema,
rec_id):
""" Return Tables obj loaded from postgres. """
if len(psql_schema):
psql_schema += '.'
tables = create_tables_load_bson_data(schema_engine, None)
# fetch mongo rec by id from source psql
ext_tables_data = {}
for table_name, table in tables.tables.iteritems():
id_name, quotes = parent_id_name_and_quotes_for_table(table)
if quotes:
id_val = "'" + str(rec_id) + "'"
else:
id_val = rec_id
indexes = [name \
for name in table.sql_column_names \
if table.sql_columns[name].index_key()]
idx_order_by = ''
if len(indexes):
idx_order_by = "ORDER BY " + ','.join(indexes)
select_fmt = 'SELECT * FROM {schema}"{table}" \
WHERE {id_name}={id_val} {idx_order_by};'
select_req = select_fmt.format(schema=psql_schema,
table=table_name,
id_name=id_name,
id_val=id_val,
idx_order_by=idx_order_by)
getLogger(__name__).debug("Get psql data: "+select_req)
src_dbreq.cursor.execute(select_req)
ext_tables_data[table_name] = []
idx = 0
for record in src_dbreq.cursor:
record_decoded = []
if type(record) is tuple:
for titem in record:
if type(titem) is str:
record_decoded.append(titem.decode('utf-8'))
else:
record_decoded.append(titem)
record = tuple(record_decoded)
getLogger(__name__).debug("result[%d]=%s", idx, record)
ext_tables_data[table_name].append(record)
idx += 1
# set external tables data to Tables
tables.load_external_tables_data(ext_tables_data)
return tables | bba0f407b2b406ff454b00e4647b6cb29dd000fd | 16,961 |
def calcCovariance(modes):
"""Return covariance matrix calculated for given *modes*."""
if isinstance(modes, Mode):
array = modes._getArray()
return np.outer(array, array) * modes.getVariance()
elif isinstance(modes, ModeSet):
array = modes._getArray()
return np.dot(array, np.dot(np.diag(modes.getVariances()), array.T))
elif isinstance(modes, NMA):
return modes.getCovariance()
else:
raise TypeError('modes must be a Mode, NMA, or ModeSet instance') | 7803e765dcf4ad40158040013691bd0f3d7775be | 16,962 |
def sparse_tensor_value_to_texts(value):
"""
Given a :class:`tf.SparseTensor` ``value``, return an array of Python strings
representing its values.
This function has been modified from Mozilla DeepSpeech:
https://github.com/mozilla/DeepSpeech/blob/master/util/text.py
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
return sparse_tuple_to_texts((value.indices, value.values, value.dense_shape)) | e1133532ecd88478d9a5a96773e413992e6566f8 | 16,963 |
def coding_problem_45(rand5):
"""
Using a function rand5() that returns an integer from 1 to 5 (inclusive) with uniform probability, implement a
function rand7() that returns an integer from 1 to 7 (inclusive).
Note: for n >= 24, rand5() ** n is a multiple of 7 and therefore rand5() ** 24 % 7 is an unbiased implementation
of rand7(). To avoid having to rely on big integer libraries, we use the property (a + b) % n == ((a % n) + b) % n
which is easy to prove by decomposing a into a // n * n + a % n.
>>> from random import randint
>>> rand5 = lambda: randint(0, 4)
>>> rand7 = coding_problem_45(rand5)
>>> 0 <= rand7 < 7
True
"""
rand7 = 0
for _ in range(24):
rand7 = (rand7 * 5 + rand5()) % 7
return rand7 | 8e26c6f95d953e0a8b3d8a33232c886742a535ce | 16,964 |
def handle_rss_api(output, kwargs):
""" Special handler for API-call 'set_config' [rss] """
name = kwargs.get('keyword')
if not name:
name = kwargs.get('name')
if not name:
return None
feed = config.get_config('rss', name)
if feed:
feed.set_dict(kwargs)
else:
config.ConfigRSS(name, kwargs)
action = kwargs.get('filter_action')
if action in ('add', 'update'):
# Use the general function, but catch the redirect-raise
try:
kwargs['feed'] = name
sabnzbd.interface.ConfigRss('/').internal_upd_rss_filter(**kwargs)
except cherrypy.HTTPRedirect:
pass
elif action == 'delete':
# Use the general function, but catch the redirect-raise
try:
kwargs['feed'] = name
sabnzbd.interface.ConfigRss('/').internal_del_rss_filter(**kwargs)
except cherrypy.HTTPRedirect:
pass
return name | 73bd10dc2a40cc1648423372e8fcae065e83dbce | 16,965 |
def progress(job_id, user: User = Depends(auth_user), db: Session = Depends(get_db)):
"""
Get a user's progress on a specific job.
"""
job = _job(db, job_id)
check_job_user(db, user, job)
progress = rules.get_progress_report(db, job, user)
return progress | fc581297463bc46ce461811d7f675cc99ee63b65 | 16,966 |
import json
def getRoom(borough):
"""Return a JSON dataset for property type of airbnb listing"""
prpt = db.session.query(data.Borough,
data.Room_Type, data.Price, data.Review_Rating, data.review_scores_cleanliness,
data.review_scores_value, data.host_response_rate).statement
df = pd.read_sql_query(prpt, db.session.bind)
df = df[df['Borough'] == borough]
df["host_response_rate"] = df["host_response_rate"].str.replace("%", "").astype(float)
df["review_scores_cleanliness"] = df["review_scores_cleanliness"].str.replace(".", "").astype(float)
df["review_scores_value"] = df["review_scores_value"].str.replace(".", "").astype(float)
df1 = df.groupby('Room_Type').count().reset_index()
df2 = df.groupby('Room_Type').mean().reset_index().round(2)
df = pd.merge(df1, df2, on='Room_Type')
df = df[['Room_Type', 'Borough', 'Price_y', 'Review_Rating_y', 'review_scores_cleanliness_y', 'review_scores_value_y', 'host_response_rate_y']].rename(
columns={'Price_y': 'Avg_price', 'Review_Rating_y':'RRate', 'review_scores_cleanliness_y':'RClean', 'review_scores_value_y':'RValue', 'host_response_rate_y':'HostResponseR' })
df['percent'] = round((df.Borough/df.Borough.sum())*100, 2)
d = df.to_dict('records')
return json.dumps(d) | 2ae05f1f5a501b0a8e25dfc7b212cb0aeecbc0f1 | 16,967 |
def get_info(name_file, what='V', parent_folder='txt_files'):
"""Get data from txt file and convert to data list
:param name_file : name of the file, without txt extension
:param what : V = vertices, E = edges, R = pose
:param parent_folder"""
file_path = get_file_path(name_file, parent_folder)
if what == 'V' or what == 'R':
my_type = 'float'
else:
my_type = 'int'
data_dict = read_data_txt(file_path, my_type)
data = as_list(data_dict)
return data | f06608340622c7173dffabb6c08f178b9e887e73 | 16,968 |
def receive_message(
sock, operation, request_id, max_message_size=MAX_MESSAGE_SIZE):
"""Receive a raw BSON message or raise socket.error."""
header = _receive_data_on_socket(sock, 16)
length = _UNPACK_INT(header[:4])[0]
actual_op = _UNPACK_INT(header[12:])[0]
if operation != actual_op:
raise ProtocolError("Got opcode %r but expected "
"%r" % (actual_op, operation))
# No request_id for exhaust cursor "getMore".
if request_id is not None:
response_id = _UNPACK_INT(header[8:12])[0]
if request_id != response_id:
raise ProtocolError("Got response id %r but expected "
"%r" % (response_id, request_id))
if length <= 16:
raise ProtocolError("Message length (%r) not longer than standard "
"message header size (16)" % (length,))
if length > max_message_size:
raise ProtocolError("Message length (%r) is larger than server max "
"message size (%r)" % (length, max_message_size))
return _receive_data_on_socket(sock, length - 16) | 0c1cd762a2a0889d2894993e0f5362e0acdaee36 | 16,969 |
def cycle_interval(starting_value, num_frames, min_val, max_val):
"""Cycles through the state space in a single cycle."""
starting_in_01 = (starting_value - min_val) / (max_val - min_val)
grid = np.linspace(starting_in_01, starting_in_01 + 2.,
num=num_frames, endpoint=False)
grid -= np.maximum(0, 2 * grid - 2)
grid += np.maximum(0, -2 * grid)
return grid * (max_val - min_val) + min_val | 34fa0d60b9d5d99eee9666d70c77e4375d37ace8 | 16,970 |
def commit_ref_info(repos, skip_invalid=False):
"""
Returns a dict of information about what commit should be tagged in each repo.
If the information in the passed-in dictionary is invalid in any way,
this function will throw an error unless `skip_invalid` is set to True,
in which case the invalid information will simply be logged and ignored.
Arguments:
repos (dict): A dict mapping Repository objects to openedx.yaml data.
skip_invalid (bool): if true, log invalid data in `repos`, but keep going.
Returns:
A dict mapping Repositories to a dict about the ref to tag, like this::
{
Repository(<full_repo_name>): {
"ref": "name of tag or branch"
"ref_type": "tag", # or "branch"
"sha": "1234566789abcdef",
"message": "The commit message"
"author": {
"name": "author's name",
"email": "author's email"
}
"committer": {
"name": "committer's name",
"email": "committer's email",
}
},
Repository(<next_repo_name>): {...},
...
}
"""
ref_info = {}
for repo, repo_data in nice_tqdm(repos.items(), desc='Find commits'):
# are we specifying a ref?
ref = repo_data["openedx-release"].get("ref")
if ref:
try:
ref_info[repo] = get_latest_commit_for_ref(repo, ref)
except (GitHubError, ValueError):
if skip_invalid:
msg = "Invalid ref {ref} in repo {repo}".format(
ref=ref,
repo=repo.full_name
)
log.error(msg)
continue
else:
raise
return ref_info | 86425248cd4a90aa75d03c2965d63aba3a38e81d | 16,971 |
def function(x, axis=0, fast=False):
"""
Estimate the autocorrelation function of a time series using the FFT.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False)
"""
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
# For computational efficiency, crop the chain to the largest power of
# two if requested.
if fast:
n = int(2**np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real
m[axis] = 0
return acf / acf[m] | cb71d63ee35adde701eba91e068b0f6898005f04 | 16,972 |
def find_binaries(*args, **kwargs):
"""Given images data, return a list of dicts containing details of
all binaries in the image which can be identified with image_id or
image_tag.
One of image_id or image_tag must be specified.
:params: See `find_image`
:exception: exceptions.ImageNotFound
:exception: exceptions.ParameterError
:exception: exceptions.NoPackages
:return: A list of dicts:
As per the Twistlock API, each dict takes the form:
{
name: 'binary name',
path: 'full path to the binary including the name'
md5: 'md5 hash for the binary'
cveCount: 'Number of CVEs reported for the binary'
}
"""
image = find_image(*args, **kwargs)
return image['data']['binaries'] | 0bc4345279f11cc751f4aee62a7773f0fa21643a | 16,973 |
import copy
def solve_version(d):
""" solve version difference,
argument map d is deepcopied.
"""
# make copy
d = copy.deepcopy(d)
v = d.get('version', 0)
# functions in _update
for f in _update_chain[v:]:
d = f(d)
return d | ce6a412cc2350a3f6c97c7e1f649a537c35f6722 | 16,974 |
from localstack.services.es import es_api
def start_elasticsearch_service(port=None, asynchronous=False):
"""
Starts the ElasticSearch management API (not the actual elasticsearch process.
"""
port = port or config.PORT_ES
return start_local_api("ES", port, api="es", method=es_api.serve, asynchronous=asynchronous) | 0af83d283735ad1bfdd0684bf1bc1ff36e42d727 | 16,975 |
def regexp_ilike(expr, pattern):
"""
---------------------------------------------------------------------------
Returns true if the string contains a match for the regular expression.
Parameters
----------
expr: object
Expression.
pattern: object
A string containing the regular expression to match against the string.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
pattern = format_magic(pattern)
return str_sql("REGEXP_ILIKE({}, {})".format(expr, pattern)) | fe8e7c9a38b5379d265651d60d80bd8804219842 | 16,976 |
import os
def extract_frames(width, height, video_filename, video_path, frames_dir, overwrite=False, start=-1, end=-1, every=1):
"""
Extract frames from a video using decord's VideoReader
:param video_path: path of the video
:param frames_dir: the directory to save the frames
:param overwrite: to overwrite frames that already exist?
:param start: start frame
:param end: end frame
:param every: frame spacing
:return: count of images saved
"""
video_path = os.path.normpath(video_path) # make the paths OS (Windows) compatible
frames_dir = os.path.normpath(frames_dir) # make the paths OS (Windows) compatible
video_dir, _ = os.path.split(video_path) # get the video path and filename from the path
assert os.path.exists(video_path) # assert the video file exists
# load the VideoReader
vr = VideoReader(video_path, ctx=cpu(0)) # can set to cpu or gpu .. ctx=gpu(0)
if start < 0: # if start isn't specified lets assume 0
start = 0
if end < 0: # if end isn't specified assume the end of the video
end = len(vr)
frames_list = list(range(start, end, every))
saved_count = 0
if every > 25 and len(frames_list) < 1000: # this is faster for every > 25 frames and can fit in memory
frames = vr.get_batch(frames_list).asnumpy()
for index, frame in zip(frames_list, frames): # lets loop through the frames until the end
save_path = os.path.join(frames_dir, "{}_{}.jpg".format(video_filename, index)) # create the save path
if not os.path.exists(save_path) or overwrite: # if it doesn't exist or we want to overwrite anyways
cv2.imwrite(
save_path,
cv2.resize(
cv2.cvtColor(frame.asnumpy(), cv2.COLOR_RGB2BGR),
(width, height),
interpolation=cv2.INTER_CUBIC)) # save the extracted image
saved_count += 1 # increment our counter by one
else: # this is faster for every <25 and consumes small memory
for index in range(start, end): # lets loop through the frames until the end
frame = vr[index] # read an image from the capture
if index % every == 0: # if this is a frame we want to write out based on the 'every' argument
save_path = os.path.join(frames_dir, "{}_{}.jpg".format(video_filename, index)) # create the save path
if not os.path.exists(save_path) or overwrite: # if it doesn't exist or we want to overwrite anyways
cv2.imwrite(
save_path,
cv2.resize(
cv2.cvtColor(frame.asnumpy(), cv2.COLOR_RGB2BGR),
(width, height),
interpolation=cv2.INTER_CUBIC)) # save the extracted image
saved_count += 1 # increment our counter by one
return saved_count | 64ffb491c7d5f07fa95c05086422d6b3d0f4c927 | 16,977 |
def make_tree(path):
"""Higher level function to be used with cache."""
return _make_tree(path) | 26919144c49f238c78a29ff4c2ce91d5da939484 | 16,978 |
def cmd(f):
"""Decorator to declare class method as a command"""
f.__command__ = True
return f | 3bdc82f0c83b0a4c0a0dd6a9629e7e2af489f0ae | 16,979 |
def small_prior():
"""Give string format of small uniform distribution prior"""
return "uniform(0, 10)" | fb636b564b238e22262b906a8e0626a5dff305d1 | 16,980 |
from typing import List
from typing import Dict
from typing import OrderedDict
def retrieve_panelist_ranks(panelist_id: int,
database_connection: mysql.connector.connect
) -> List[Dict]:
"""Retrieve a list of show dates and the panelist rank for the
requested panelist ID"""
cursor = database_connection.cursor()
query = ("SELECT s.showid, s.showdate, pm.showpnlrank "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE pm.panelistid = %s "
"AND s.bestof = 0 AND s.repeatshowid IS NULL "
"AND pm.panelistscore IS NOT NULL "
"ORDER BY s.showdate ASC;")
cursor.execute(query, (panelist_id,))
result = cursor.fetchall()
cursor.close()
if not result:
return None
ranks = []
for row in result:
info = OrderedDict()
info["show_id"] = row[0]
info["show_date"] = row[1].isoformat()
info["rank"] = row[2]
ranks.append(info)
return ranks | 0560a4f0d2c11f9dbd56d25c63d70fc29ed4292d | 16,981 |
def maxPixel(rpl):
"""maxPixel(rpl)
Computes the max pixel spectrum for the specified ripple/raw spectrum object."""
xs = epq.ExtremumSpectrum()
for r in xrange(0, rpl.getRows()):
dt2.StdOut.append(".")
if dt2.terminated:
break
for c in xrange(0, rpl.getColumns()):
rpl.setPosition(r, c)
xs.include(rpl)
return xs | 9cb40df8a02e7c861aebedb7d2e13c3fac04d024 | 16,982 |
def skip_device(name):
""" Decorator to mark a test to only run on certain devices
Takes single device name or list of names as argument
"""
def decorator(function):
name_list = name if type(name) == list else [name]
function.__dict__['skip_device'] = name_list
return function
return decorator | 1bacdce5396ada5e2ba7a8ca70a8dfb273016323 | 16,983 |
def conv1d(inputs,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for 1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: An integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
if context.in_eager_mode():
raise ValueError(
'Functional layers are currently not compatible with eager execution.'
'Use tf.layers.Conv1D instead.')
layer = Conv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_reuse=reuse,
_scope=name)
return layer.apply(inputs) | f3e9dc40d7da6a9bc7a55ec8b13c91a4ac8ba2c3 | 16,984 |
def _transform(ctx):
"""Implementation for the transform rule."""
if ctx.attr.command and not ctx.attr.transform:
fail(("Target '%s' specifies 'command = ...', but this attribute is ignored when no pattern" +
" is supplied with the 'transform' attribute") % (ctx.label.name))
lines = []
for transform in ctx.attr.transform:
lines.append("%s) transform;;" % to_glob(transform))
for include in ctx.attr.include:
lines.append("%s) include;;" % to_glob(include))
transformer = ctx.actions.declare_file("%s-transformer.sh" % (ctx.label.name))
ctx.actions.expand_template(template = ctx.file._transformer, output = transformer, substitutions = {
"{root}": ctx.bin_dir.path,
"{command}": ctx.attr.command,
"{patterns}": "\n".join(lines),
"{debug}": ["", "enabled"][int(ctx.attr.debug)],
}, is_executable = True)
outputs = []
opaths = []
for iattr in ctx.attr.inputs:
for ifile in iattr.files.to_list():
opath = ifile.short_path
info = ("FROM", iattr.label, "PATH", opath, "DIR", ifile.is_directory, "ORIGIN", ifile.short_path)
if not ifile.is_directory:
debug(ctx.attr.debug, "FILE", *info)
opaths.append((ifile, ifile.path, opath))
continue
if not ifile.short_path in ctx.attr.expand:
debug(ctx.attr.debug, "TREE-FILTER", *info)
add = ctx.actions.declare_directory(opath)
outputs.append(add)
ctx.actions.run(inputs = [ifile], outputs = [add], executable = transformer, arguments = [
ifile.path,
add.path,
], tools = ctx.files.tools)
continue
debug(ctx.attr.debug, "TREE-EXPAND", *info)
outputs = []
for output in ctx.attr.expand[ifile.short_path]:
if output.endswith("/"):
add = ctx.actions.declare_directory(output[:-1])
outputs.append(add)
ctx.actions.run(inputs = [ifile], outputs = add, executable = transformer, arguments = [
ifile.path,
add.path, # ctx.bin_dir.path + "/" + ctx.label.package + "/" + opath
], tools = ctx.files.tools)
continue
opaths.append((ifile, ifile.path + "/" + output, ifile.short_path + "/" + output))
for ifile, ipath, opath in opaths:
debug(ctx.attr.debug, "GENERATING FILE", opath, "- FROM TREE?", ifile.is_directory, "- SOURCE PATH", ifile.short_path)
if matchany(opath, ctx.attr.transform, default = False):
ofile = ctx.actions.declare_file(opath)
outputs.append(ofile)
_run(ctx, ifile, ipath, ofile)
continue
if matchany(opath, ctx.attr.include):
ofile = ctx.actions.declare_file(opath)
outputs.append(ofile)
if not ifile.is_directory:
ctx.actions.symlink(output = ofile, target_file = ifile, progress_message = _message(ctx, ofile))
else:
ctx.actions.run(outputs = [ofile], inputs = [ifile], executable = "cp", arguments = ["-f", ipath, ofile.path])
continue
for o in outputs:
debug(ctx.attr.debug, "EXPECTING OUTPUT", o.short_path, "- TREE?", o.is_directory)
return [DefaultInfo(files = depset(outputs))] | 062e2f8ec7bfe751f0525e7279e5930b21409f28 | 16,985 |
from typing import List
from typing import Union
def check_constraints(
df: pd.DataFrame, schema: dict
) -> List[Union[ConstraintError, ConstraintTypeError]]:
"""
Check table field constraints.
Arguments:
df: Table.
schema: Table schema (https://specs.frictionlessdata.io/table-schema).
Returns:
A list of errors.
"""
errors = []
for field in schema.get("fields", []):
constraints = field.get("constraints", {})
result = check_field_constraints(df[field["name"]], **constraints, field=field)
if result:
errors += result
return errors | bd6569932b2eb6e4510b7a5e8e6b22e92ddaa1e5 | 16,986 |
from typing import Iterable
from typing import Tuple
from typing import Optional
from typing import Mapping
from typing import Any
import textwrap
def consolidate_fully(
inputs: Iterable[Tuple[core.Key, xarray.Dataset]],
*,
merge_kwargs: Optional[Mapping[str, Any]] = None,
combine_kwargs: Optional[Mapping[str, Any]] = None,
) -> Tuple[core.Key, xarray.Dataset]:
"""Consolidate chunks via merge/concat into a single (Key, Dataset) pair."""
concatenated_chunks = []
combined_offsets = {}
combined_vars = set()
for key, chunk in consolidate_chunks(inputs, combine_kwargs):
# We expect all chunks to be fully combined in all dimensions and all chunks
# to have the same offset (in each dimension). The chunks from
# consolidate_chunks() should already have this property but we explicitly
# check it here again in case consolidate_chunks changes.
for dim, offset in key.offsets.items():
if dim in combined_offsets and combined_offsets[dim] != offset:
raise ValueError('consolidating chunks fully failed because '
f'chunk\n{chunk}\n has offsets {key.offsets} '
f'that differ from {combined_offsets}')
combined_offsets[dim] = offset
concatenated_chunks.append(chunk)
combined_vars.update(chunk.keys())
# Merge variables, but unlike consolidate_variables, we merge all chunks and
# not just chunks per unique key.
kwargs = dict(
compat='equals',
join='exact',
combine_attrs='override',
)
if merge_kwargs is not None:
kwargs.update(merge_kwargs)
try:
dataset = xarray.merge(concatenated_chunks, **kwargs)
except (ValueError, xarray.MergeError) as original_error:
repr_string = '\n'.join(repr(ds) for ds in concatenated_chunks[:2])
if len(concatenated_chunks) > 2:
repr_string += '\n...'
repr_string = textwrap.indent(repr_string, prefix=' ')
raise ValueError(
f'merging dataset chunks with variables {combined_vars} failed.\n'
+ repr_string
) from original_error
return core.Key(combined_offsets, combined_vars), dataset | 240f2579f97ed1b2eaef2d4d7e9e35ed17dbacdf | 16,987 |
from typing import Iterator
from re import T
from typing import Optional
def first(items: Iterator[T]) -> Optional[T]:
"""Return the first item of the iterator."""
return next(items, None) | 5571c8d1541ce2cb3f49da736f92e17fe6326e6d | 16,988 |
from typing import Sequence
def plot_precision_recall_curve(
precisions: Sequence[float], recalls: Sequence[float],
title: str = 'Precision/Recall curve'
) -> matplotlib.figure.Figure:
"""
Plots the precision recall curve given lists of (ordered) precision
and recall values.
Args:
precisions: list of float, precision for corresponding recall values,
should have same length as *recalls*.
recalls: list of float, recall for corresponding precision values,
should have same length as *precisions*.
title: str, plot title
Returns: matplotlib.figure.Figure, reference to the figure
"""
assert len(precisions) == len(recalls)
fig, ax = plt.subplots(1, 1, tight_layout=True)
ax.step(recalls, precisions, color='b', alpha=0.2, where='post')
ax.fill_between(recalls, precisions, alpha=0.2, color='b', step='post')
ax.set(x_label='Recall', y_label='Precision', title=title)
ax.set(x_lim=(0.0, 1.05), y_lim=(0.0, 1.05))
return fig | d71220b71dfe26aae949676105ba643e249f1c69 | 16,989 |
import json
def Serialize(obj):
"""Return a binary serialized version of object.
Depending on the serialization method, some complex objects or input
formats may not be serializable.
UTF-8 strings (by themselves or in other structures e.g. lists) are always
supported.
Args:
obj: any object
Returns:
str, possibly containing ascii values >127
Raises:
SerializeError: if an error occured during serialization
"""
try:
return json.dumps(obj)
except TypeError as e:
raise SerializeError(e) | d9632f0104c69bfb38396f47f5813fd9a87d6361 | 16,990 |
from typing import Union
def getDragObject(parent: QWidget, item: Union['SourceListWidgetItem', 'DestTreeWidgetItem']) -> QDrag:
"""Instantiate QDrag of type application/draggerItem with corresponding QMimeData
Parameters
----------
parent: QWidget
item: Union['SourceListWidgetItem', 'DestTreeWidgetItem']
Returns
-------
QDrag
QDrag object holding item value as QMimeData
"""
# construct dataStream with item value
itemData = QByteArray()
getData(itemData, item.value)
mimeData = QMimeData()
mimeData.setData(LISTBOX_W_VALUE_MIMETYPE, itemData)
drag = QDrag(parent)
drag.setHotSpot(QPoint(0, 0))
drag.setMimeData(mimeData)
return drag | a6990e9f1a95632d25e15d993d0117cdb911cf7b | 16,991 |
import requests
from bs4 import BeautifulSoup
def get_soup(url):
"""
Makes a request to the given url and returns a BeautifulSoup instance of Soup
"""
res = requests.get(url)
if not res.content:
return None
soup = BeautifulSoup(res.content, "lxml")
return soup | bc4e79f4e2313e3c3edc6f6f123b6d13f71c0075 | 16,992 |
def _grow_segment(segment, addition):
"""Combine two segments into one, if possible."""
if _eq(segment[-1], addition[0]): # append addition
return segment + addition[1:]
elif _eq(segment[-1], addition[-1]): # append reversed addition
return segment + list(reversed(addition[:-1]))
elif _eq(segment[0], addition[-1]): # prepend addition
return addition[:-1] + segment
elif _eq(segment[0], addition[0]): # prepend reversed addition
return list(reversed(addition[1:])) + segment
else:
raise ValueError("addition doesn't fit segment") | 12f48ec2efbd74ac09f9277f2769a4b35030a425 | 16,993 |
def dsmoothlist_by_deform_exp(deform_exp, ag_mode):
"""
Automatically extract the selected artificial generations for training and validation set:
'Resp': ['respiratory_motion', 'single_frequency', 'mixed_frequency', 'zero'],
'NoResp': ['single_frequency', 'mixed_frequency', 'zero'],
'SingleOnly': ['single_frequency'],
'MixedOnly': ['mixed_frequency'],
'SingleResp': ['single_frequency', 'respiratory_motion', 'zero'],
please note that for validation set we do not need to select all of them
:param deform_exp:
:param ag_mode: artificial generation mode: 'Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization'
:return:
"""
if ag_mode not in ['Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization']:
raise ValueError("exp_mode should be in ['Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization']")
dsmoothlist_training = []
dsmoothlist_validation = []
deform_exp_setting = load_deform_exp_setting(deform_exp)
all_deform_methods = deform_exp_setting['DeformMethods']
comp_dict = {'Resp': ['respiratory_motion', 'single_frequency', 'mixed_frequency', 'zero'],
'NoResp': ['single_frequency', 'mixed_frequency', 'zero'],
'SingleOnly': ['single_frequency'],
'MixedOnly': ['mixed_frequency'],
'SingleResp': ['single_frequency', 'respiratory_motion', 'zero'],
'Visualization': []
}
for i, deform_method in enumerate(all_deform_methods):
if deform_method in comp_dict[ag_mode]:
dsmoothlist_training.append(i)
if deform_exp in ['3D_max7_D14_K', '3D_max15_D14_K', '3D_max20_D14_K', '3D_max15_SingleFrequency_Visualization']:
if ag_mode == 'Resp':
dsmoothlist_validation = [0, 5, 10]
elif ag_mode == 'NoResp':
dsmoothlist_validation = [5, 8, 10]
elif ag_mode == 'SingleResp':
dsmoothlist_validation = [4, 8, 10]
elif ag_mode == 'SingleOnly':
dsmoothlist_validation = [5, 6, 8]
elif ag_mode == 'MixedOnly':
dsmoothlist_validation = [9, 10, 12]
else:
raise ValueError('dsmoothlist_validation not found for deform_exp='+deform_exp+', please add it manually')
return dsmoothlist_training, dsmoothlist_validation | 965ecf7373c313dccd290fb8a7c6c2075645a16a | 16,994 |
from typing import Callable
from typing import BinaryIO
from typing import Tuple
def get_data_reader(header: Header) -> Callable[[BinaryIO], Tuple]:
"""Make a binary reader function for data."""
names = get_data_names(header)
format_ = ""
for name in names:
if "CH" in name:
format_ += "h"
elif "Pulse" in name:
format_ += "L"
elif "Logic" in name:
format_ += "H"
elif "Alarm" in name:
format_ += "H"
elif "AlOut" in name:
format_ += "H"
elif "Status" in name:
format_ += "H"
else:
raise ValueError(name)
struct = Struct(BIG_ENDIAN + format_)
def reader(f: BinaryIO) -> Tuple:
return struct.unpack(f.read(struct.size))
return reader | c243d5d50ec8738f8f8673fd9bf40b9d26cad69b | 16,995 |
def test_api_mediawiki(monkeypatch):
"""The api_mediawiki test using mocks."""
result = "OpenClassrooms est une école en ligne..."
def mock_summary(*args, **kwargs):
return result
monkeypatch.setattr(
MediawikiApi, 'search', mock_summary)
wikipedia = MediawikiApi()
assert wikipedia.search('openclassrooms') == result | 28b22d4acf195dee3d1e7f10688610fea71fea3f | 16,996 |
from typing import Callable
def check_fnr(fnr: str, d_numbers=True, h_numbers=False, logger: Callable = lambda _x: None) -> bool:
"""
Check if a number is a valid fødselsnumber.
Args:
fnr: A string containing the fodselsnummer to check
h_numbers: False (the default) if h-numbers should be accepted
d_numbers: True (the default) if d-numbers should be accepted
logger: A function used to log things
Returns:
True if it is a valid fodselsnummer, False otherwise.
"""
try:
return validate_fnr(fnr=fnr, d_numbers=d_numbers, h_numbers=h_numbers)
except ValueError as e:
logger(str(e))
return False | 2d5af194f1a69a093c6bf69b2cd42537d2dd32b0 | 16,997 |
import unittest
def get_case_list_from_cls(test_cls_list):
"""
将测试类转化为测试用例
:return:
"""
test_list = []
for test_cls in test_cls_list:
test_cases = unittest.TestLoader().loadTestsFromTestCase(test_cls)
test_list.append(test_cases)
return test_list | 3f7ed0c7ed0b9110a9cb11579087712321ec868e | 16,998 |
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
def align_times(sync_behavioral, sync_neural, score_thresh=0.9999,
ignore_poor_alignment=False, return_model=False, verbose=False):
"""Align times across different recording systems.
Parameters
----------
sync_behavioral : 1d array
Sync pulse times from behavioral computer.
sync_neural : 1d array
Sync pulse times from neural computer.
score_thresh : float, optional, default: 0.9999
R^2 threshold value to check that the fit model is better than.
ignore_poor_alignment : bool, optional, default: False
Whether to ignore a bad alignment score.
return_model : bool, optional, default: False
Whether to return the model object. If False, returns
verbose : bool, optional, default: False
Whether to print out model information.
Returns
-------
model : LinearRegression
The fit model object. Only returned if `return_model` is True.
model_intercept : float
Intercept of the model predicting differences between sync pulses.
Returned if `return_model` is False.
model_coef : float
Learned coefficient of the model predicting differences between sync pulses.
Returned if `return_model` is False.
score : float
R^2 score of the model, indicating how good a fit there is between sync pulses.
"""
# sklearn imports are weird, so re-import here
# the sub-modules here aren't available from the global namespace
# Reshape to column arrays for scikit-learn
sync_behavioral = sync_behavioral.reshape(-1, 1)
sync_neural = sync_neural.reshape(-1, 1)
# Linear model to predict alignment between time traces
x_train, x_test, y_train, y_test = train_test_split(\
sync_behavioral, sync_neural, test_size=0.50, random_state=42)
model = LinearRegression()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
score = r2_score(y_test, y_pred)
bad_score_msg = 'This session has bad synchronization between brain and behavior'
if score < score_thresh:
if not ignore_poor_alignment:
raise ValueError(bad_score_msg)
else:
print(bad_score_msg)
if verbose:
print('coef', model.coef_[0], '\n intercept', model.intercept_[0])
print('score', score)
if return_model:
return model, score
else:
return model.intercept_[0], model.coef_[0][0], score | 8bc8ad2a92267a0c1c5e8a4c6a71494910df8b7f | 16,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.