content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import re
def remove_comments_from_json(string):
"""
Removes comments from a JSON string, supports // and /* formats. From Stack Overflow.
@param str string: Original text.
@return: Text without comments.
@rtype: str
"""
pattern = r"((?<!\\)\".*?(?<!\\)\"|\'.*?\')|(/\*.*?\*/|//[^\r\n]*$)"
# first group captures quoted strings (double or single)
# second group captures comments (//single-line or /* multi-line */)
regex = re.compile(pattern, re.MULTILINE | re.DOTALL)
def _replacer(match):
# if the 2nd group (capturing comments) is not None,
# it means we have captured a non-quoted (real) comment string.
if match.group(2) is not None:
return "" # so we will return empty to remove the comment
else: # otherwise, we will return the 1st group
return match.group(1) # captured quoted-string
return regex.sub(_replacer, string) | d5629e680bc1890458cbd40ea3f68d9d1629a8d0 | 5,100 |
def visualize_depth(visual_dict, disp_or_error="disp", dataset="KITTI"):
"""visual_dict:"left_img": raw left image
"depth_error" or "disp"
"est_left_img": image reprojected from right
"depth": output depth
"photo_error": photometric error
all tensor should be normalized to [0, 1]befor input with
shape [C, H, W] with .detach()
disp_or_error: output "disp"arity when used in training or "error"
dataset: from "KITTI" "CS"
"""
for k, v, in visual_dict.items():
v = v.unsqueeze(0)
if dataset == "KITTI":
v = F.interpolate(v, [375, 1242], mode="bilinear",
align_corners=False)
elif dataset == "CS":
v = F.interpolate(v, [384, 1000], mode="bilinear",
align_corners=False)
v = v.cpu().squeeze(0).permute(1, 2, 0).numpy()
visual_dict[k] = v
left_img = visual_dict["left_img"] * 255
est_left_img = visual_dict["est_left_img"] * 255
if disp_or_error == "error":
error = visual_dict["depth_error"][..., 0]
normal_error = mpl.colors.Normalize(vmin=0,
vmax=1)
mapper_error = cm.ScalarMappable(norm=normal_error, cmap='coolwarm')
error = (mapper_error.to_rgba(error)[:, :, :3] * 255)
else:
error = visual_dict["disp"] * 255
error = cv.applyColorMap(error.astype(np.uint8),
cv.COLORMAP_OCEAN)
depth = visual_dict["depth"][..., 0]
disp = 1 / depth
vmin = np.percentile(disp, 5)
normal_disp = mpl.colors.Normalize(vmin=vmin, vmax=disp.max())
mapper_disp = cm.ScalarMappable(norm=normal_disp, cmap='magma')
depth_color = (mapper_disp.to_rgba(disp)[:, :, :3] * 255)
photo_error = visual_dict["photo_error"] * 255
photo_error = cv.applyColorMap(photo_error.astype(np.uint8), cv.COLORMAP_JET)
photo_error = cv.cvtColor(photo_error, cv.COLOR_RGB2BGR)
fused_img = (left_img + est_left_img)/2
photoerror_img = left_img + 0.5 * photo_error
photoerror_img = photoerror_img / np.max(photoerror_img)
photoerror_img *= 255
depth_img = left_img + 0.8 * depth_color
depth_img = depth_img / np.max(depth_img)
depth_img *= 255
img1 = np.vstack([left_img, est_left_img, depth_color, photo_error])
img2 = np.vstack([error, fused_img, depth_img, photoerror_img])
all_img = np.hstack([img1, img2]).astype(np.uint8)
all_img = cv.cvtColor(all_img, cv.COLOR_RGB2BGR)
return all_img | 0c1f9bc74e9ff4548e8e1c9f052e14d0c8bc8d4a | 5,101 |
def put_s3_object(bucket, key_name, local_file):
"""Upload a local file in the execution environment to S3
Parameters
----------
bucket: string, required
S3 bucket that will holds the attachment
key_name: string, required
S3 key is the destination of attachment
local_file: string, required
Location of the attachment to process
Returns
-------
boolean (True if successful, False if not successful)
"""
tracer.put_metadata('object', f's3://{bucket}/{key_name}')
try:
s3_resource.Bucket(bucket).upload_file(local_file, key_name)
result = True
tracer.put_annotation('ATTACHMENT_UPLOAD', 'SUCCESS')
except Exception as e:
logger.error(str(e))
tracer.put_annotation('ATTACHMENT_UPLOAD', 'FAILURE')
result = False
return(result) | d566b430541ec22c10e4a173bcd0a53e244ca252 | 5,102 |
import re
def parse_parionssport(url):
"""
Get ParionsSport odds from url
"""
if "parionssport" not in sb.TOKENS:
try:
token = get_parionssport_token()
sb.TOKENS["parionssport"] = token
except OpenSSL.crypto.Error:
return {}
if "paris-" in url.split("/")[-1] and "?" not in url:
sport = url.split("/")[-1].split("paris-")[-1]
return parse_sport_parionssport(sport)
regex = re.findall(r'\d+', url)
if regex:
id_league = regex[-1]
try:
return parse_parionssport_api("p" + str(id_league))
except TypeError:
return {}
return {} | 26cc749d0f951f3785ff322eeacd212bc55d1714 | 5,103 |
def get_index(channel_urls=(), prepend=True, platform=None,
use_local=False, use_cache=False, unknown=False, prefix=False):
"""
Return the index of packages available on the channels
If prepend=False, only the channels passed in as arguments are used.
If platform=None, then the current platform is used.
If prefix is supplied, then the packages installed in that prefix are added.
"""
if use_local:
channel_urls = ['local'] + list(channel_urls)
channel_urls = normalize_urls(channel_urls, platform)
if prepend:
channel_urls.extend(get_channel_urls(platform))
channel_urls = prioritize_channels(channel_urls)
index = fetch_index(channel_urls, use_cache=use_cache, unknown=unknown)
if prefix:
priorities = {c: p for c, p in itervalues(channel_urls)}
maxp = max(itervalues(priorities)) + 1 if priorities else 1
for dist, info in iteritems(install.linked_data(prefix)):
fn = info['fn']
schannel = info['schannel']
prefix = '' if schannel == 'defaults' else schannel + '::'
priority = priorities.get(schannel, maxp)
key = prefix + fn
if key in index:
# Copy the link information so the resolver knows this is installed
index[key] = index[key].copy()
index[key]['link'] = info.get('link') or True
else:
# only if the package in not in the repodata, use local
# conda-meta (with 'depends' defaulting to [])
info.setdefault('depends', [])
info['priority'] = priority
index[key] = info
return index | 176ac7147b6e32133ee07d9e302679294c9b24ce | 5,104 |
def is__invsign1(*args):
"""
is__invsign1(ea) -> bool
"""
return _ida_nalt.is__invsign1(*args) | ee13bc4cd76d134a65c4c71355d1a5449eb27fa4 | 5,105 |
def every(n_steps):
"""Returns True every n_steps, for use as *_at functions in various places."""
return lambda step: step % n_steps == 0 | 02fc6bc59fa6f223b681539baeae32c40bd9577e | 5,106 |
def read_files(mousefile,humanfile):
""" Read into anndata objects and return """
mouse = sc.read_10x_h5(mousefile)
if humanfile != None:
human = sc.read_10x_h5(humanfile)
else:
human = None
return(mouse,human) | fc6bc99160b22c0b4c8c9b48e9f99ee51ca61b62 | 5,107 |
def calc_batch_size(num_examples, batches_per_loop, batch_size):
"""Reduce the batch size if needed to cover all examples without a remainder."""
assert batch_size > 0
assert num_examples % batches_per_loop == 0
while num_examples % (batch_size * batches_per_loop) != 0:
batch_size -= 1
return batch_size | 3c394813a98a8414645f633a519001937247e8b0 | 5,108 |
def upload_volume(request, *args, **kwargs):
"""
User upload volume data, delete the original data first.
"""
if not (request.user and request.user.is_authenticated()):
raise PermissionDenied()
user = request.user
assert 'pid' in kwargs
pid = kwargs['pid']
assert 'pk' in kwargs
id = kwargs['pk']
volume = Volume.objects.get(project__id=pid, id=id)
# Check whether the user is the member of this project
if not check_member_in_project(volume.project, user):
raise PermissionDenied(detail="User {} is not in project {}."
.format(user.username, volume.project.name))
if not request.FILES.get('file'):
raise ParseError(detail="There is no upload file.")
logger.info("User {} upload files to volume {}-{}.".format(
user.username, volume.project.name, volume.name))
filename = get_upload_volume_filename(volume, user)
save_upload_file_to_disk(request.FILES['file'], filename)
client = NFSLocalClient()
volume_dir = get_volume_direction_on_nfs(volume)
# Clear the dir first
client.removedir(volume_dir)
client.makedir(volume_dir)
client.copy_file_to_remote_and_untar(filename, volume_dir)
remove_file_from_disk(filename)
return JsonResponse({"detail": "success"}) | 8b2c7630473ca2f1aa309cb763fc018562115761 | 5,109 |
def has_admin_access(user):
"""Check if a user has admin access."""
return user == 'admin' | d178861bee504f6f3026c9e495d56cc8d2d7c3d3 | 5,110 |
def get_compss_type(value, depth=0):
# type: (object, int) -> int
""" Retrieve the value type mapped to COMPSs types.
:param value: Value to analyse.
:param depth: Collections depth.
:return: The Type of the value.
"""
# First check if it is a PSCO since a StorageNumpy can be detected
# as a numpy object.
if has_id(value):
# If has method getID maybe is a PSCO
try:
if get_id(value) not in [None, 'None']:
# the 'getID' + id == criteria for persistent object
return TYPE.EXTERNAL_PSCO
else:
return TYPE.OBJECT
except TypeError:
# A PSCO class has been used to check its type (when checking
# the return). Since we still don't know if it is going to be
# persistent inside, we assume that it is not. It will be checked
# later on the worker side when the task finishes.
return TYPE.OBJECT
# If it is a numpy scalar, we manage it as all objects to avoid to
# infer its type wrong. For instance isinstance(np.float64 object, float)
# returns true
if np and isinstance(value, np.generic):
return TYPE.OBJECT
if isinstance(value, (bool, str, int, PYCOMPSS_LONG, float)):
value_type = type(value)
if value_type is bool:
return TYPE.BOOLEAN
elif value_type is str:
# Char does not exist as char, only strings.
# Files will be detected as string, since it is a path.
# The difference among them is defined by the parameter
# decoration as FILE.
return TYPE.STRING
elif value_type is int:
if IS_PYTHON3:
if value < PYTHON_MAX_INT: # noqa
return TYPE.INT
else:
return TYPE.LONG
else:
return TYPE.INT
elif value_type is PYCOMPSS_LONG:
return TYPE.LONG
elif value_type is float:
return TYPE.DOUBLE
elif depth > 0 and is_basic_iterable(value):
return TYPE.COLLECTION
elif depth > 0 and is_dict(value):
return TYPE.DICT_COLLECTION
else:
# Default type
return TYPE.OBJECT | c272cb6b2cdca159de08182dfac67b00b94b0d77 | 5,111 |
def set_namedtuple_defaults(namedtuple, default=None):
"""
Set *all* of the fields for a given nametuple to a singular value.
Modifies the tuple in place, but returns it anyway.
More info:
https://stackoverflow.com/a/18348004
:param namedtuple: A constructed collections.namedtuple
:param default: The default value to set.
:return: the modified namedtuple
"""
namedtuple.__new__.__defaults__ = (default,) * len(namedtuple._fields)
return namedtuple | 1cade18cbdf5a4ae945ae246b94676572810d1e8 | 5,112 |
def test_tuple_get_item_merge():
"""Test composite function can be merged from pattern containing TupleGetItem nodes."""
pattern_table = [
("bn_relu", make_bn_relu_pattern())
]
def before():
x = relay.var('x', shape=(1, 8))
gamma = relay.var("gamma", shape=(8,))
beta = relay.var("beta", shape=(8,))
moving_mean = relay.var("moving_mean", shape=(8,))
moving_var = relay.var("moving_var", shape=(8,))
bn_node = relay.nn.batch_norm(x, gamma, beta, moving_mean, moving_var)
tuple_get_item_node = bn_node[0]
r = relay.nn.relu(tuple_get_item_node)
return relay.Function([x, gamma, beta, moving_mean, moving_var], r)
def expected():
x = relay.var('x', shape=(1, 8))
beta = relay.var("beta", shape=(8,))
gamma = relay.var("gamma", shape=(8,))
moving_mean = relay.var("moving_mean", shape=(8,))
moving_var = relay.var("moving_var", shape=(8,))
# bn_relu function
in_1 = relay.var('x1', shape=(1, 8))
in_2 = relay.var('gamma1', shape=(8,))
in_3 = relay.var('beta1', shape=(8,))
in_4 = relay.var('moving_mean1', shape=(8,))
in_5 = relay.var('moving_var1', shape=(8,))
bn_node = relay.nn.batch_norm(in_1, in_2, in_3, in_4, in_5)
tuple_get_item_node = bn_node[0]
relu_node = relay.nn.relu(tuple_get_item_node)
bn_relu = relay.Function([in_1, in_2, in_3, in_4, in_5], relu_node)
bn_relu = bn_relu.with_attr("Composite", "bn_relu")
bn_relu = bn_relu.with_attr("PartitionedFromPattern",
"nn.batch_norm_TupleGetItem0_nn.relu_")
# merged function
r = relay.Call(bn_relu, [x, gamma, beta, moving_mean, moving_var])
return relay.Function([x, gamma, beta, moving_mean, moving_var], r)
check_result(pattern_table, before(), expected()) | 2a170843b643451268026d9f0f18901bce944597 | 5,113 |
def prepare_data_for_storage(major_version, minor_version, patch_version):
"""Prepares data to store to file.
"""
temp = Template(
u'''/*Copyright (c) 2016, Ford Motor Company\n'''
u'''All rights reserved.\n'''
u'''Redistribution and use in source and binary forms, with or without\n'''
u'''modification, are permitted provided that the following conditions are met:\n'''
u'''Redistributions of source code must retain the above copyright notice, this\n'''
u'''list of conditions and the following disclaimer.\n'''
u'''Redistributions in binary form must reproduce the above copyright notice,\n'''
u'''this list of conditions and the following\n'''
u'''disclaimer in the documentation and/or other materials provided with the\n'''
u'''distribution.\n'''
u'''Neither the name of the Ford Motor Company nor the names of its contributors\n'''
u'''may be used to endorse or promote products derived from this software\n'''
u'''without specific prior written permission.\n'''
u'''THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n'''
u'''AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n'''
u'''IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n'''
u'''ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n'''
u'''LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n'''
u'''CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n'''
u'''SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n'''
u'''INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n'''
u'''CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n'''
u'''ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n'''
u'''POSSIBILITY OF SUCH DAMAGE.\n'''
u'''*/\n'''
u'''#ifndef GENERATED_MSG_VERSION_H\n'''
u'''#define GENERATED_MSG_VERSION_H\n\n'''
u'''namespace application_manager {\n\n'''
u'''const uint16_t major_version = $m_version;\n'''
u'''const uint16_t minor_version = $min_version;\n'''
u'''const uint16_t patch_version = $p_version;\n'''
u'''} // namespace application_manager\n'''
u'''#endif // GENERATED_MSG_VERSION_H''')
data_to_file = temp.substitute(m_version = major_version, min_version = minor_version, p_version = patch_version)
return data_to_file | b3411398179472e0e4975c442bdc0dea2ecc1556 | 5,114 |
def flatten_mock_calls(mock):
"""
Flatten the calls performed on a particular mock object,
into a list of calls with arguments.
"""
result = []
for call in mock.mock_calls:
call = list(call)
call_name = call[0]
if '.' in str(call_name):
call_name = str(call_name).split('.')[-1]
result.append([call_name] + call[1:])
return result | 7c41025382f4ca25db1ccd328e9eb17e1d72a01a | 5,115 |
from typing import Any
def clean_setting(
name: str,
default_value: object,
min_value: int = None,
max_value: int = None,
required_type: type = None,
choices: list = None,
) -> Any:
"""cleans the user input for an app's setting in the Django settings file
Will use default_value if setting is not defined.
Will use minimum or maximum value if respective boundary is exceeded.
Args:
default_value: value to use if setting is not defined
min_value: minimum allowed value (0 assumed for int)
max_value: maximum value value
required_type: Mandatory if `default_value` is `None`,
otherwise derived from default_value
Returns:
cleaned value for setting
This function is designed to be used in a dedicated module like ``app_settings.py``
as layer between the actual settings and all other modules.
``app_settings.py`` will import and clean all settings and all other modules are supposed to import the settings it.
Example for app_settings:
.. code-block:: python
from app_utils.django import clean_setting
EXAMPLE_SETTING = clean_setting("EXAMPLE_SETTING", 10)
"""
if default_value is None and not required_type:
raise ValueError("You must specify a required_type for None defaults")
if not required_type:
required_type = type(default_value)
if min_value is None and issubclass(required_type, int):
min_value = 0
if issubclass(required_type, int) and default_value is not None:
if min_value is not None and default_value < min_value:
raise ValueError("default_value can not be below min_value")
if max_value is not None and default_value > max_value:
raise ValueError("default_value can not be above max_value")
if not hasattr(settings, name):
cleaned_value = default_value
else:
dirty_value = getattr(settings, name)
if dirty_value is None or (
isinstance(dirty_value, required_type)
and (min_value is None or dirty_value >= min_value)
and (max_value is None or dirty_value <= max_value)
and (choices is None or dirty_value in choices)
):
cleaned_value = dirty_value
elif (
isinstance(dirty_value, required_type)
and min_value is not None
and dirty_value < min_value
):
logger.warn(
"You setting for {} it not valid. Please correct it. "
"Using minimum value for now: {}".format(name, min_value)
)
cleaned_value = min_value
elif (
isinstance(dirty_value, required_type)
and max_value is not None
and dirty_value > max_value
):
logger.warn(
"You setting for {} it not valid. Please correct it. "
"Using maximum value for now: {}".format(name, max_value)
)
cleaned_value = max_value
else:
logger.warn(
"You setting for {} it not valid. Please correct it. "
"Using default for now: {}".format(name, default_value)
)
cleaned_value = default_value
return cleaned_value | 91066dd26987ad04fc9ae9b8447e35fa64f8365d | 5,116 |
def update_not_existing_kwargs(to_update, update_from):
"""
This function updates the keyword aguments from update_from in
to_update, only if the keys are not set in to_update.
This is used for updated kwargs from the default dicts.
"""
if to_update is None:
to_update = {}
to_update.update({k:v for k,v in update_from.items() if k not in to_update})
return to_update | a66de151e6bc6d8f5b2f1b0ff32e30d2c8cb5277 | 5,117 |
import os
def get_reddit():
"""Returns the reddit dataset, downloading locally if necessary.
This dataset was released here:
https://www.reddit.com/r/redditdev/comments/dtg4j/want_to_help_reddit_build_a_recommender_a_public/
and contains 23M up/down votes from 44K users on 3.4M links.
Returns a CSR matrix of (item, user, rating"""
filename = os.path.join(_download.LOCAL_CACHE_DIR, "reddit.hdf5")
if not os.path.isfile(filename):
log.info("Downloading dataset to '%s'", filename)
_download.download_file(URL, filename)
else:
log.info("Using cached dataset at '%s'", filename)
with h5py.File(filename, "r") as f:
m = f.get("item_user_ratings")
return csr_matrix((m.get("data"), m.get("indices"), m.get("indptr"))) | 710ff17f8802ff2e8ead248ec8c6ef6fbb8e08f5 | 5,118 |
def linear_forward(A, W, b):
"""Returns Z, (A, W, b)"""
Z = (W @ A) + b
cache = (A, W, b)
return Z, cache | 41d223473d2d8f084f13ca0f90f483b66e479a04 | 5,119 |
import contextlib
import wave
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (16000, 22050, 32000, 48000)
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate | 5148e788cb5f4bfe63b3e6f2cac24fe704fd9596 | 5,120 |
def update_rho_hat(rho_hat_q, rho_hat_g, phi_hat, K, Q, Y_tp1, gamma_t, W):
"""
rho_hat is an intermediate quantity
rho_hat_{n, nu, theta}(x) = 1/n E[ sum_{t=1}^n s(X_{t-1}, X_t, Y_t | Y_{0:n}, X_n=x)]
where s() are the sufficient statistics
see Cappe (2.5)
In our case (discrete emissions HMM), it be broken down into two separable components:
rho_hat_q{n, nu, theta}(i,j,k; theta) = 1/n E[ sum_{t=1}^n I_{X_{t-1}=i, X_t=j} | Y_{0:n}, X_n=k)]
rho_hat_g{n, nu, theta}(i,k; theta) = 1/n E[ sum_{t=0}^n I_{X_t=i} s(Y_t)| Y_{0:n}, X_n=k)]
where s() here is just a multinoulli vector with W entries, so we can re-express it as
rho_hat_g{n, nu, theta}(i,w,k; theta) = 1/n E[ sum_{t=0}^n I_{X_t=i, Y_t=w}| Y_{0:n}, X_n=k)]
rho_hat_q has KxKxK entries
rho_hat_g has KxWxK entries
"""
rho_hat_q = update_rho_hat_q(rho_hat_q, phi_hat, Q, gamma_t, K)
rho_hat_g = update_rho_hat_g(rho_hat_g, Y_tp1, phi_hat, Q, gamma_t, K, W)
return rho_hat_q, rho_hat_g | 55713f9456ad3e8a5a1bf2fadf58e0befddf717a | 5,121 |
def obtain_dihedral_angles(system_coords, bond_distance):
"""
system_coords: coords for 1 frame
"""
ref_selection = system_coords[0]
# Process bonds for reference frame (first)
bonds = []
sq_bond_distance = bond_distance**2
for i in range(len(ref_selection)-1):
for j in range(i+1, len(ref_selection)):
if mathTools.sq_distance(ref_selection[i], ref_selection[j]) <= sq_bond_distance:
bonds.append(tuple(sorted([i, j])))
print "DBG: Found %d bonds"%(len(bonds))
# Find angles
angles = []
for i in range(len(bonds)-1):
for j in range(i+1, len(bonds)):
if bonds_are_linked(bonds[i], bonds[j]):
angles.append(tuple(sorted([bonds[i], bonds[j]])))
print "DBG: Found %d angles"%(len(angles))
# Finally, find dihedrals
dihedrals = []
for i in range(len(angles)-1):
for j in range(i+1, len(angles)):
if angles_share_bond(angles[i], angles[j]):
dihedrals.append(tuple(sorted([angles[i], angles[j]])))
print "DBG: Found %d dihedrals"%(len(dihedrals))
# Now reorganize atoms in dihedrals so that
# they are consecutive and we can calculate the
# actual dihedral angle
r_dihedrals = []
for dihedral in dihedrals:
indices = get_dihedral_indices(dihedral)
# Get permutation of minimum distance
distances = []
for perm in itertools.permutations(indices):
#print dihedral, perm
distances.append(( mathTools.sq_distance(ref_selection[perm[0]],ref_selection[perm[1]])+
mathTools.sq_distance(ref_selection[perm[1]],ref_selection[perm[2]])+
mathTools.sq_distance(ref_selection[perm[2]],ref_selection[perm[3]]),
perm))
# We will pick the one which summed distances is smaller
distances.sort()
r_dihedrals.append(distances[0][1])
all_angles = []
for ref in system_coords:
#Calculate the angles for a ref
angles = []
for dihedral_indexes in r_dihedrals:
atom1 = ref[dihedral_indexes[0]]
atom2 = ref[dihedral_indexes[1]]
atom3 = ref[dihedral_indexes[2]]
atom4 = ref[dihedral_indexes[3]]
angles.append( mathTools.calc_dihedral(atom1, atom2, atom3, atom4))
all_angles.append(angles)
return numpy.array(all_angles) | 7aba964f81c550e6d6204d28327d65020e7372b0 | 5,122 |
def piecewise_accel(duration,initial,final):
"""Defines a piecewise acceleration.
Args:
duration (float): Length of time for the acceleration to complete.
initial (float): Initial value.
final (float): Final value.
"""
a = (final-initial)
return lambda t: initial + a * (
(9./2 * t**3/duration**3) * (t<duration/3)
+ (-9*t**3/duration**3 + 27./2*t**2/duration**2 - 9./2*t/duration + 1./2) * (t<2*duration/3)*(t>=duration/3)
+ (9./2*t**3/duration**3 - 27./2 * t**2/duration**2 + 27./2*t/duration - 7./2) * (t>= 2*duration/3)) | 7f6acd7ba2610a2e56cc1f0758b3a39543bfe8c2 | 5,123 |
def get_displayed_views(id):
"""
get views in window rect by view id str
:param res_id:
:return:
"""
return get_solo().get_displayed_views(id) | f3058f78ae1a2d70a3771a52cc852f1119a51f6a | 5,124 |
def get_build_version(xform):
"""
there are a bunch of unreliable places to look for a build version
this abstracts that out
"""
version = get_version_from_build_id(xform.domain, xform.build_id)
if version:
return version, BuildVersionSource.BUILD_ID
version = get_version_from_appversion_text(
get_meta_appversion_text(xform)
)
if version:
return version, BuildVersionSource.APPVERSION_TEXT
xform_version = xform.version
if xform_version and xform_version != '1':
return int(xform_version), BuildVersionSource.XFORM_VERSION
return None, BuildVersionSource.NONE | 417debd5d3daf10c28222d42e6cc90869f5779ec | 5,125 |
import typing
def get_shortlist(routing_table: 'TreeRoutingTable', key: bytes,
shortlist: typing.Optional[typing.List['KademliaPeer']]) -> typing.List['KademliaPeer']:
"""
If not provided, initialize the shortlist of peers to probe to the (up to) k closest peers in the routing table
:param routing_table: a TreeRoutingTable
:param key: a 48 byte hash
:param shortlist: optional manually provided shortlist, this is done during bootstrapping when there are no
peers in the routing table. During bootstrap the shortlist is set to be the seed nodes.
"""
if len(key) != constants.HASH_LENGTH:
raise ValueError("invalid key length: %i" % len(key))
return shortlist or routing_table.find_close_peers(key) | 884e4444cca22eaf9495dad8ff28bfc601b4c778 | 5,126 |
from typing import Dict
from typing import Any
def get_merged_contextvars(bound_logger: BindableLogger) -> Dict[str, Any]:
"""
Return a copy of the current context-local context merged with the context
from *bound_logger*.
.. versionadded:: 21.2.0
"""
ctx = get_contextvars()
ctx.update(structlog.get_context(bound_logger))
return ctx | 3ee59f57ee10c4f57c4085e660cc054830688416 | 5,127 |
import warnings
def policy_iteration(policy, env, value_function=None, threshold=0.00001, max_steps=1000, **kwargs):
"""
Policy iteration algorithm, which consists on iterative policy evaluation until convergence for the current policy
(estimate over many sweeps until you can't estimate no more). And then finally updates policy to be greedy.
"""
value_function = last_converged_v_fun = np.zeros(env.world.size) if value_function is None else value_function
greedy_policy = policy
for step_number in range(max_steps):
new_value_function = utils.single_step_policy_evaluation(greedy_policy, env, value_function=value_function, **kwargs)
delta_eval = np.max(value_function - new_value_function)
value_function = new_value_function
if delta_eval < threshold: # policy evaluation converged
new_policy = utils.greedy_policy_from_value_function(greedy_policy, env, value_function=value_function, **kwargs)
delta = np.max(last_converged_v_fun - new_value_function)
last_converged_v_fun = new_value_function
if delta < threshold: # last converged value functions difference converged
break
else:
greedy_policy = new_policy
elif step_number == max_steps - 1:
greedy_policy = utils.greedy_policy_from_value_function(greedy_policy, env, value_function=last_converged_v_fun,
**kwargs)
warning_message = 'Policy iteration did not reach the selected threshold. Finished after reaching ' \
'the maximum {} steps with delta_eval {}'.format(step_number + 1, delta_eval)
warnings.warn(warning_message, UserWarning)
return last_converged_v_fun, greedy_policy | 090fc3a4e87986afc9dfd3565a2d234c7d2e8005 | 5,128 |
import os
import logging
import math
import shutil
def main(pdb_complex_core, pdb_fragment, pdb_atom_core_name, pdb_atom_fragment_name, steps, core_chain="L",
fragment_chain="L", output_file_to_tmpl="growing_result.pdb", output_file_to_grow="initialization_grow.pdb",
h_core = None, h_frag = None, rename=False, threshold_clash=1.70):
"""
From a core (protein + ligand core = core_chain) and fragment (fragment_chain) pdb files, given the heavy atoms
names that we want to connect, this function add the fragment to the core structure. We will get three PDB files:
(1) the ligand core of the complex isolated, that will be used in further steps to generate the template of the
initial structure; (2) the ligand completed with the core and the fragment added, also prepared to generate the
template of the final structure; (3) the pdb file that will be used to initialise PELE simulations. Here we have
the core structure with the fragment added, but this fragment has been size-reduced in order to get small bond
lengths between its atoms. (During PELE simulations this distance will increase linearly until it reaches the
bond length given by the template of the final structure)
:param pdb_complex_core: pdb file with a complex (protein + ligand) that will be used as core to perform the
addition of the fragment. The chain of the ligand needs to be named as "L". We will also use the information of the
protein to perform calculations of contacts with the ligand.
:param pdb_fragment: pdb file, normally with only the ligand (please, put "L" as name of the chain that contain the
ligand), that will be added to the core.
:param pdb_atom_core_name: heavy atom name (string) of the ligand core where we want to add the fragment and
form a new bond.
:param pdb_atom_fragment_name: heavy atom name (string) of the ligand fragment where we want to perform the
connection to form a new bond with the core.
:param core_chain: name of the chain which contains the ligand in the pdb of the core complex. string
:param fragment_chain: name of the chain which contains the ligand in the pdb of the fragment. string
:param output_file_to_tmpl: name of the pdb file with the result of the connection between the core and the fragment
(single ligand). string. The resname of the molecule will be "GRW" and the resnum "1". "growing_result.pdb" by
default.
:param output_file_to_grow: name of the pdb file that will be used to initialise PELE simulations. string.
"initialization_grow.pdb" by default.
:param h_core: if the user wants to select an specific hydrogen atom of the core to create the new bond, its name
must be specified here.
:param h_frag: if the user wants to select an specific hydrogen atom of the fragment to create the new bond, its name
must be specified here.
:param rename: if set, the names of the pdb atom names will be replaced with "G+atom_number_fragment".
:param threshold_clash: distance that will be used to identity which atoms are doing clashes between atoms of the
fragment and the core.
:returns: [changing_names_dictionary, hydrogen_atoms, "{}.pdb".format(core_residue_name), output_file_to_tmpl,
output_file_to_grow, core_original_atom, fragment_original_atom]
"""
if not os.path.exists(c.PRE_WORKING_DIR):
os.mkdir(c.PRE_WORKING_DIR)
# Check that ligand names are not repeated
check_and_fix_repeated_lignames(pdb_complex_core, pdb_fragment, core_chain, fragment_chain)
for pdb_file in (pdb_complex_core, pdb_fragment):
logging.info("Checking {} ...".format(pdb_file))
checker.check_and_fix_pdbatomnames(pdb_file)
# Get the selected chain from the core and the fragment and convert them into ProDy molecules.
ligand_core = complex_to_prody.pdb_parser_ligand(pdb_complex_core, core_chain)
fragment = complex_to_prody.pdb_parser_ligand(pdb_fragment, fragment_chain)
# We will check that the structures are protonated. We will also create a new PDB file for each one and we will get
# the residue name of each ligand.
core_residue_name = extract_heteroatoms_pdbs(pdb_complex_core, True, core_chain, output_folder=c.PRE_WORKING_DIR)
frag_residue_name = extract_heteroatoms_pdbs(pdb_fragment, True, fragment_chain, output_folder=c.PRE_WORKING_DIR)
# We will use the PDBs previously generated to get a list of Bio.PDB.Atoms for each structure
bioatoms_core_and_frag = from_pdb_to_bioatomlist([os.path.join(c.PRE_WORKING_DIR, core_residue_name),
os.path.join(c.PRE_WORKING_DIR, frag_residue_name)])
# Then, we will have to transform the atom names of the core and the fragment to a list object
# (format required by functions)
pdb_atom_names = [pdb_atom_core_name, pdb_atom_fragment_name]
# Using the Bio.PDB.Atoms lists and this names we will get the heavy atoms that we will use later to do the bonding
heavy_atoms = extract_heavy_atoms(pdb_atom_names, bioatoms_core_and_frag)
# Once we have the heavy atoms, for each structure we will obtain the hydrogens bonded to each heavy atom.
# We will need pdbs because we will use the information of the protein to select the hydrogens properly.
hydrogen_atoms = extract_hydrogens(pdb_atom_names, bioatoms_core_and_frag, [pdb_complex_core, pdb_fragment], h_core,
h_frag, core_chain, fragment_chain)
# Create a list with the atoms that form a bond in core and fragment.
core_bond = [heavy_atoms[0], hydrogen_atoms[0]]
fragment_bond = [hydrogen_atoms[1], heavy_atoms[1]] # This has to be in inverted order to do correctly the superimposition
logger.info("Performing a superimposition of bond {} of the fragment on bond {} of the core..."
.format(fragment_bond, core_bond))
# Using the previous information we will superimpose the whole fragment on the bond of the core in order to place
# the fragment in the correct position, deleting the H.
merged_structure, core_original_atom, fragment_original_atom = join_structures(core_bond, fragment_bond,
ligand_core, fragment,
pdb_complex_core, pdb_fragment,
core_chain, fragment_chain)
# It is possible to create intramolecular clashes after placing the fragment on the bond of the core, so we will
# check if this is happening, and if it is, we will perform rotations of 10º until avoid the clash.
check_results = check_collision(merged_structure=merged_structure[0], bond=heavy_atoms, theta=0,
theta_interval=math.pi/18, core_bond=core_bond, list_of_atoms=bioatoms_core_and_frag[1],
fragment_bond=fragment_bond, core_structure=ligand_core, fragment_structure=fragment,
pdb_complex=pdb_complex_core, pdb_fragment=pdb_fragment, chain_complex=core_chain,
chain_fragment=fragment_chain, threshold_clash=threshold_clash)
# If we do not find a solution in the previous step, we will repeat the rotations applying only increments of 1º
if not check_results:
check_results = check_collision(merged_structure=merged_structure[0], bond=heavy_atoms, theta=0,
theta_interval=math.pi/180, core_bond=core_bond, list_of_atoms=bioatoms_core_and_frag[1],
fragment_bond=fragment_bond, core_structure=ligand_core, fragment_structure=fragment,
pdb_complex=pdb_complex_core, pdb_fragment=pdb_fragment, chain_complex=core_chain,
chain_fragment=fragment_chain, threshold_clash=threshold_clash)
# Now, we want to extract this structure in a PDB to create the template file after the growing. We will do a copy
# of the structure because then we will need to resize the fragment part, so be need to keep it as two different
# residues.
try:
structure_to_template = check_results.copy()
except AttributeError:
raise AttributeError("Frag cannot superimpose the fragment onto the core's hydrogen. \
In order to create space for the fragment \
manually rotate the hydrogen bond of the core where the fragment will be attached to. \
We are currently working to fix this automatically")
# Once we have all the atom names unique, we will rename the resname and the resnum of both, core and fragment, to
# GRW and 1. Doing this, the molecule composed by two parts will be transformed into a single one.
changing_names = pdb_joiner.extract_and_change_atomnames(structure_to_template, fragment.getResnames()[0],
core_residue_name, rename=rename)
molecule_names_changed, changing_names_dictionary = changing_names
# Check if there is still overlapping names
if pdb_joiner.check_overlapping_names(molecule_names_changed):
logger.critical("{} is repeated in the fragment and the core. Please, change this atom name of the core by"
" another one.".format(pdb_joiner.check_overlapping_names(molecule_names_changed)))
logger.info("The following names of the fragment have been changed:")
for transformation in changing_names_dictionary:
logger.info("{} --> {}".format(transformation, changing_names_dictionary[transformation]))
finishing_joining(molecule_names_changed, core_chain)
# Extract a PDB file to do the templates
prody.writePDB(os.path.join(c.PRE_WORKING_DIR, output_file_to_tmpl), molecule_names_changed)
logger.info("The result of core + fragment has been saved in '{}'. This will be used to create the template file."
.format(output_file_to_tmpl))
# Now, we will use the original molecule to do the resizing of the fragment.
reduce_molecule_size(check_results, frag_residue_name, steps)
point_reference = check_results.select("name {} and resname {}".format(pdb_atom_fragment_name, frag_residue_name))
fragment_segment = check_results.select("resname {}".format(frag_residue_name))
translate_to_position(hydrogen_atoms[0].get_coord(), point_reference.getCoords(), fragment_segment)
# Repeat all the preparation process to finish the writing of the molecule.
changing_names = pdb_joiner.extract_and_change_atomnames(check_results, fragment.getResnames()[0], core_residue_name, rename=rename)
molecule_names_changed, changing_names_dictionary = changing_names
finishing_joining(molecule_names_changed, core_chain)
logger.info("The result of core + fragment(small) has been saved in '{}'. This will be used to initialise the growing."
.format(output_file_to_grow))
# Add the protein to the ligand
output_ligand_grown_path = os.path.join(c.PRE_WORKING_DIR, "ligand_grown.pdb")
prody.writePDB(output_ligand_grown_path, molecule_names_changed)
with open(output_ligand_grown_path) as lig:
content_lig = lig.readlines()
content_lig = content_lig[1:]
content_lig = "".join(content_lig)
# Join all parts of the PDB
output_file = []
chain_not_lig = get_everything_except_ligand(pdb_complex_core, core_chain)
output_file.append(chain_not_lig)
output_file.append("{}TER".format(content_lig))
out_joined = "".join(output_file)
with open(os.path.join(c.PRE_WORKING_DIR, output_file_to_grow), "w") as output: # Save the file in the pregrow folder
output.write(out_joined)
# Make a copy of output files in the main directory
shutil.copy(os.path.join(c.PRE_WORKING_DIR, output_file_to_grow), ".") # We assume that the user will be running FrAG in PELE's main folder...
# In further steps we will probably need to recover the names of the atoms for the fragment, so for this reason we
# are returning this dictionary in the function.
return changing_names_dictionary, hydrogen_atoms, "{}.pdb".format(core_residue_name), output_file_to_tmpl, \
output_file_to_grow, core_original_atom, fragment_original_atom | a88f304b9e86c23dd5c39a61f29b19d0b02526bc | 5,129 |
def create_blueprint(app):
"""Register blueprint routes on app."""
blueprint = Blueprint(
"invenio_records_marc21",
__name__,
template_folder="../templates",
url_prefix="/marc21",
)
blueprint = init_theme_views(blueprint, app)
blueprint = init_records_views(blueprint, app)
return blueprint | 8aa53185d3d41e4e5aabfa2efaa6a73b94dc02f5 | 5,130 |
import json
def mock_machine():
"""Fixture localapi Machine init with the data/response.json file."""
with requests_mock.Mocker() as mock_resp:
f = open(response_test_path,)
data = json.load(f)
machine_ipaddr = "0.0.0.0"
mock_addr = f"http://{machine_ipaddr}:3000/api/v1/hvac"
mock_resp.post(mock_addr, json=data)
return Machine(machine_ipaddr) | 726aecd3195d39f8a0c48d93a00299a5d61ac90a | 5,131 |
def get_files_links(service, v):
"""Print links of uploaded files.
:param: service (object): Goolge Drive service object.
:param: v (string): Version of Tor Browser to look for.
"""
windows_re = 'torbrowser-install-%s_\w\w(-\w\w)?\.exe(\.asc)?' % v
linux_re = 'tor-browser-linux\d\d-%s_(\w\w)(-\w\w)?\.tar\.xz(\.asc)?' % v
osx_re = 'TorBrowser-%s-osx\d\d_(\w\w)(-\w\w)?\.dmg(\.asc)?' % v
# dictionary to store file names and IDs
files_dict = dict()
print "Trying to fetch links of uploaded files..."
links = service.files().list().execute()
items = links.get('items', [])
if not items:
raise ValueError('No files found.')
else:
for item in items:
if re.search(windows_re, item['title']):
files_dict[item['title']] = item['id']
elif re.search(linux_re, item['title']):
files_dict[item['title']] = item['id']
elif re.search(osx_re, item['title']):
files_dict[item['title']] = item['id']
return files_dict | bda4af382bb629ce40721ccff64553cd2b98d558 | 5,132 |
def list_(context, field, mpd_query=None):
"""
*musicpd.org, music database section:*
``list {TYPE} [ARTIST]``
Lists all tags of the specified type. ``TYPE`` should be ``album``,
``artist``, ``date``, or ``genre``.
``ARTIST`` is an optional parameter when type is ``album``,
``date``, or ``genre``.
This filters the result list by an artist.
*Clarifications:*
The musicpd.org documentation for ``list`` is far from complete. The
command also supports the following variant:
``list {TYPE} {QUERY}``
Where ``QUERY`` applies to all ``TYPE``. ``QUERY`` is one or more pairs
of a field name and a value. If the ``QUERY`` consists of more than one
pair, the pairs are AND-ed together to find the result. Examples of
valid queries and what they should return:
``list "artist" "artist" "ABBA"``
List artists where the artist name is "ABBA". Response::
Artist: ABBA
OK
``list "album" "artist" "ABBA"``
Lists albums where the artist name is "ABBA". Response::
Album: More ABBA Gold: More ABBA Hits
Album: Absolute More Christmas
Album: Gold: Greatest Hits
OK
``list "artist" "album" "Gold: Greatest Hits"``
Lists artists where the album name is "Gold: Greatest Hits".
Response::
Artist: ABBA
OK
``list "artist" "artist" "ABBA" "artist" "TLC"``
Lists artists where the artist name is "ABBA" *and* "TLC". Should
never match anything. Response::
OK
``list "date" "artist" "ABBA"``
Lists dates where artist name is "ABBA". Response::
Date:
Date: 1992
Date: 1993
OK
``list "date" "artist" "ABBA" "album" "Gold: Greatest Hits"``
Lists dates where artist name is "ABBA" and album name is "Gold:
Greatest Hits". Response::
Date: 1992
OK
``list "genre" "artist" "The Rolling Stones"``
Lists genres where artist name is "The Rolling Stones". Response::
Genre:
Genre: Rock
OK
*GMPC:*
- does not add quotes around the field argument.
*ncmpc:*
- does not add quotes around the field argument.
- capitalizes the field argument.
"""
field = field.lower()
query = _list_build_query(field, mpd_query)
if field == u'artist':
return _list_artist(context, query)
elif field == u'album':
return _list_album(context, query)
elif field == u'date':
return _list_date(context, query)
elif field == u'genre':
pass | b95b6e4e5be01a1796d1708fc214821ce4f78491 | 5,133 |
def palindrome(d: int)-> str:
"""
Function is getting the digits of the number, left shifting it by multiplying
it with 10 at each iteration and adding it the previous result.
Input: Integer
Output: String (Sentence telling if the number is palindrome or not)
"""
remainder = 0
revnum = 0
n = len(str(d))
copynum2 = d
while copynum2 != 0:
remainder = copynum2%10
revnum = revnum * 10 + remainder
copynum2 //= 10
if d == revnum:
return "Given Numer {} is palindrome".format(d)
else:
return "Given Numer {} is not palindrome".format(d) | fe654ab92a905e265987856bcd2106c7b082b490 | 5,134 |
import json
def import_from_file(request):
"""
Import a part of a source site's page tree via an import of a JSON file
exported to a user's filesystem from the source site's Wagtail Admin
The source site's base url and the source page id of the point in the
tree to import defined what to import and the destination parent page
defines where to import it to.
"""
if request.method == 'POST':
form = ImportFromFileForm(request.POST, request.FILES)
if form.is_valid():
import_data = json.loads(form.cleaned_data['file'].read().decode('utf-8-sig'))
parent_page = form.cleaned_data['parent_page']
try:
page_count = import_pages(import_data, parent_page)
except LookupError as e:
messages.error(request, _(
"Import failed: %(reason)s") % {'reason': e}
)
else:
messages.success(request, ungettext(
"%(count)s page imported.",
"%(count)s pages imported.",
page_count) % {'count': page_count}
)
return redirect('wagtailadmin_explore', parent_page.pk)
else:
form = ImportFromFileForm()
return render(request, 'wagtailimportexport/import_from_file.html', {
'form': form,
}) | 0dd6d4f2499a05c13002a0c410a8558b8f5b3b29 | 5,135 |
import os
def teqc_version():
"""
return string with location of teqcexecutable
author: kristine larson
"""
exedir = os.environ['EXE']
gpse = exedir + '/teqc'
# heroku version should be in the main area
if not os.path.exists(gpse):
gpse = './teqc'
return gpse | 1b4ea8473dac722a69341afcf30c8e465dde9302 | 5,136 |
def graph_search(problem, verbose=False, debug=False):
"""graph_search(problem, verbose, debug) - Given a problem representation
attempt to solve the problem.
Returns a tuple (path, nodes_explored) where:
path - list of actions to solve the problem or None if no solution was found
nodes_explored - Number of nodes explored (dequeued from frontier)
"""
frontier = PriorityQueue()
root = Node(problem, problem.initial)
frontier.append(root)
node = frontier.pop()
pop = True # for right pop left pop for BFS
if node.expand(node.problem)[0].g < 0:
# DFS which has the negative depth
# since start from the deepest node
frontier = deque()
frontier.append(root)
elif node.expand(node.problem)[0].h == 2:
# BFS
pop = False
frontier = deque()
frontier.append(root)
else:
# Manhattan
frontier.append(node)
DONE = False
nodes_explored = 0
explored_set = Explored()
while not DONE:
if pop:
node = frontier.pop() # DFS A*
else:
node = frontier.popleft() # BFS
if debug:
print("Next decision is:", str(node))
explored_set.add(node.state.state_tuple())
nodes_explored += 1
if problem.goal_test(node.state):
solved_path = node.path()
if debug:
print("Puzzle solved")
# print("path:", str(node.path()))
DONE = True
# if Verbose True display the info stats in requirement
if verbose:
print("Solution in %d moves" % (len(solved_path) - 1))
print("Initial State")
print(solved_path[0])
for i in range(1, len(solved_path)):
print("Move %d - %s" % (i, solved_path[i].action))
print(solved_path[i].state)
return solved_path, nodes_explored
# Not solved yet
else:
for child in node.expand(node.problem):
# add new child to frontier set
if not explored_set.exists(child.state.state_tuple()):
frontier.append(child)
explored_set.add(child)
# finish when there is no node in the queue
# if debug:
# print("Num node in quenue:", str(len(frontier)))
DONE = len(frontier) == 0
if verbose:
print("No solution found")
return None, nodes_explored | f3046ba138b926390308ada2165fb7eef1fbc92a | 5,137 |
def _build_groupby_indices(df, table_name, join_columns):
"""
Pre-computes indexes based on the group-by columns.
Returns a dictionary of tuples to the list of indices.
"""
log.info("Grouping table '{}' by: {}.".format(table_name,
", ".join(join_columns)))
ret = df.groupby(join_columns).indices
if len(join_columns) == 1:
# Manually patch the dictionary to make sure its keys are tuples.
ret = {(k,): v for k, v in ret.items()}
return ret | 16ba9cd231aac2560a5735dc4727dd5c15b90fc2 | 5,138 |
from typing import List
def add_multiple_package(package_list: List[str]) -> str:
"""
Generate latex code to add multiple package to preamble
:param package_list: List of package to add in preamble
"""
usepackage_command_list = []
for package in package_list:
usepackage_command_list.append(rf"""\usepackage{{{package}}}""")
return "\n".join(usepackage_command_list) | 90bdd0a521c094d92c35ef92e62d6b43f6b135b4 | 5,139 |
from metrics.models import Group
def emailAdmins(msgData):
"""
Emails all admins with given message. States which admin created/is sending the message to everyone.
Return: {bool}
"""
try:
if not msgData['msg']:
print('No message was provided to send.')
return False
admins = list(Group.objects.get(name='admins').user_set.all().values_list('username', flat=True))
returnMessage, emailSent = sendEmail({
'subject': '[Omnia] Admins communication',
'recipients': admins,
'fromEmail': msgData['fromEmail'],
'message': f'<div style="font-family:sans-serif;font-size:14px;line-height:20px;"><p>Message from {msgData["sender"]} to all {len(admins)} Omnia admins:</p><p>{msgData["msg"]}</p></div>'
})
return (returnMessage, emailSent)
except Exception as ex:
return (f"Error: Admin email failed to send. Error message: {returnMessage}", False) | d44989205c2c60bc618cffcfc9a08ad141f35e4b | 5,140 |
def add(isamAppliance, name, chainItems=[], description=None, check_mode=False, force=False):
"""
Create an STS chain template
"""
if force is False:
ret_obj = search(isamAppliance, name)
if force is True or ret_obj['data'] == {}:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
json_data = {
"name": name,
"chainItems": chainItems
}
if description is not None:
json_data['description'] = description
return isamAppliance.invoke_post(
"Create an STS chain template", uri, json_data,
requires_modules=requires_modules,
requires_version=requires_version)
return isamAppliance.create_return_object() | 7050cfbb052164ed9c570c065b62d5d90609df2c | 5,141 |
def MinHamDistance(pattern, dna_list):
"""Calculate the minimum Hamming distance from a DNA list."""
return sum(HammingDistanceDiffLen(pattern, sequence) for sequence in
dna_list) | 37b1bc96e8a9622060ee6c1361f30df3b69b844f | 5,142 |
from datetime import datetime
def _add_note(text: str, user: KarmaUser) -> str:
"""Adds a new note to the database for the given user."""
_, note_msg = _parse_note_cmd(text)
if not note_msg:
return f"Sorry {user.username}, could not find a note in your message."
if _note_exists(note_msg, user):
return f"Sorry {user.username}, you already have an identical note."
note = KarmaNote(
user_id=user.user_id, timestamp=datetime.datetime.now(), note=note_msg
)
session = db_session.create_session()
session.add(note)
session.commit()
return f"Hey {user.username}, you've just stored a note." | 4b81f45c9839a919b41b6f45a09beaf322821211 | 5,143 |
import sys
def get_next_method(generator_instance):
"""
Cross-platform function that retrieves the 'next' method from a generator
instance.
:type generator_instance: Any
:rtype: () -> Any
"""
if sys.version_info > (3, 0):
return generator_instance.__next__
else:
return generator_instance.next | 115bdd13c5afc74d1d5204d004c1034ae6438cb1 | 5,144 |
def process_line(this_line, do_stemming=False, remove_stopwords=False):
"""
Given a line from the CSV file, gets the stemmed tokens.
"""
speech = process_csv_line(this_line)
speech_tokens = process_raw_speech_text(speech.contents, perform_stemming=do_stemming,
delete_stopwords=remove_stopwords)
return speech_tokens | 2730bc7e942a2031f96cc40e889d72cf728bd45a | 5,145 |
def metadef_tag_count(context, namespace_name):
"""Get metadef tag count in a namespace"""
namespace = metadef_namespace_get(context, namespace_name)
_check_namespace_visibility(context, namespace, namespace_name)
count = 0
for tag in DATA['metadef_tags']:
if tag['namespace_id'] == namespace['id']:
count = count + 1
return count | bc863cdbdde5abe4d845f01f49eed1a357e008e4 | 5,146 |
from typing import Literal
def act2graph(graph: Graph, xml_root: Xml, registry: dict,
namespaces: dict, tag: str) -> Graph:
""" Transform activityName tag into RDF graph.
The function transforms the Activity MasterData into identifier. The output
is a RDF graph that represents a part of the Ecoinvent nomenclature
structured with The IEO ontology. The output represents the centrally
registrered identifier (CRID) by the database version and the activity name
identifier, e.g. ecoinvent3.0:88d6c0aa-0053-4367-b0be-05e4b49ff3c5 for the
copper production, primary.
Variables:
- graph: the graph to update
- xml_root: the root of the xml file
- registry: dictionary containing the reference/info of the data registry
- tag: string containing the namespace tag
- namespaces: dictionary containing the namespaces with tags
"""
# crid_reg: CRID registry, e.g Ecoinvent
crid_reg = registry['reg_id']
crid_reg_label = registry['label']
# Database identifier, e.g. EcoInvent3.1
major_release = xml_root.attrib['majorRelease']
minor_release = xml_root.attrib['minorRelease']
database_version = f'v{major_release}_{minor_release}'
database_label = f'{crid_reg_label}{major_release}.{minor_release}'
database_id = crid_reg+database_version
graph.add((ECO[crid_reg], RDFS.label, Literal(crid_reg_label, lang='en')))
graph.add((ECO.activityId, RDFS.subClassOf, ACT_CRID))
activity_id_label = 'EcoInvent activity identifier'
graph.add((ECO.activityId, RDFS.label, Literal(activity_id_label, lang='en')))
graph.add((ECO.activity_name, RDFS.subClassOf, REF_ACTIVITY))
activity_label = 'EcoInvent activity label'
graph.add((ECO.activity_name, RDFS.label, Literal(activity_label, lang='en')))
for activity_name in xml_root.findall(tag, namespaces):
activity_name_id = activity_name.attrib['id']
crid = activity_name_id+database_version
graph.add((ECO[crid], RDF.type, ECO.activityId))
graph.add((ECO[activity_name_id], RDF.type, ECO.activity_name))
# Define the property relation between the symbols of the CRID
graph.add((ECO[crid], BFO.has_part, ECO[database_id]))
graph.add((ECO[database_id], BFO.part_of, ECO[crid]))
graph.add((ECO[crid], BFO.has_part, ECO[activity_name_id]))
graph.add((ECO[activity_name_id], BFO.part_of, ECO[crid]))
# Define the labels with the different languages
xml_ns = namespaces['xml']
for name in activity_name.findall('eco:name', namespaces):
lang = name.attrib['{'+xml_ns+'}lang']
activity_label = name.text
crid_label = f'{database_label}:{activity_label}'
graph.add((ECO[crid], RDFS.label, Literal(crid_label, lang=lang)))
graph.add((ECO[activity_name_id],
RDFS.label,
Literal(activity_label, lang=lang)))
return graph | 899522fa59aa8acf8c0f55377793fc70be6c112b | 5,147 |
from typing import AnyStr
def to_checksum_address(value: AnyStr) -> ChecksumAddress:
"""
Makes a checksum address given a supported format.
"""
norm_address = to_normalized_address(value)
address_hash = encode_hex(keccak(text=remove_0x_prefix(norm_address)))
checksum_address = add_0x_prefix(
"".join(
(
norm_address[i].upper()
if int(address_hash[i], 16) > 7
else norm_address[i]
)
for i in range(2, 42)
)
)
return ChecksumAddress(HexAddress(checksum_address)) | 7223c1fa612a1445c5c7d66410b9f34e4c302a74 | 5,148 |
def is_volatile(type):
"""returns True, if type represents C++ volatile type, False otherwise"""
nake_type = remove_alias(type)
return isinstance(nake_type, cpptypes.volatile_t) | d60e4ea471a818b878267e6f6f9a2e05f2728b1c | 5,149 |
def load_adult(as_frame: bool = False):
"""Load and return the higly imbalanced binary classification [adult income datatest](http://www.cs.toronto.edu/~delve/data/adult/desc.html).
you may find detailed description [here](http://www.cs.toronto.edu/~delve/data/adult/adultDetail.html)
"""
with resources.path(
"pytorch_widedeep.datasets.data", "adult.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy() | 432ef18a197dba1a0e64b7606ba2d350fc402f28 | 5,150 |
from typing import Optional
from typing import Union
from typing import Callable
import json
import types
from typing import NoReturn
def sam(
body: Optional[Union[bool,Callable]] = json.loads,
pathParams: Optional[Union[bool,Callable]] = False,
queryString: Optional[Union[bool,Callable]] = False,
headers: Optional[Union[bool,Callable]] = False,
authenticate: Optional[Callable[[dict], types.AuthUser]] = None,
authorize: Optional[Callable[[types.AuthUser], bool]] = None,
jsonize_response: bool = True,
keep_event: bool = False,
keep_context: bool = False,
pass_auth_user: bool = True,
):
"""Wraps an AWS lambda handler function to handle auth, to catch
and handle errors, and to convert lambda handler default parameters
to a functions declared parameters.
:param body: Should the wrapper function pass `event`'s "body"
attribute as an arg to inner function (called "body")? If `body`
is callable, it will be used to parse the values.
For example, if the body is string-ified JSON, you can use `json.loads`
to load the request (or `parsers.json`, a wrapper around `json.loads`).
Or, you could use a `pydantic` model to parse and validate the input.
If this param parsing raises an error, it will be caught and returned
as an `errors.RequestParseError`.
See also other params: `pathParams`, `queryString`, and `headers`.
:param pathParams: Should the wrapper function pass `event`'s "pathParams"
attribute as an arg to inner function (called "path")? If `pathParams`
is callable, it will be used to parse the values.
See also other params: `body`, `queryString`, and `headers`.
:param queryString: Should the wrapper function pass `event`'s "queryString"
attribute as an arg to inner function (called "query")? If `queryString`
is callable, it will be used to parse the values.
See also other params: `pathParams`, `body`, and `headers`.
:param headers: Should the wrapper function pass `event`'s "headers"
attribute as an arg to inner function (called "headers")? If `headers`
is callable, it will be used to parse the values.
See also other params: `pathParams`, `queryString`, and `body`.
:param authenticate: Function to authenticate the requesting user.
Takes the full `event` as an input and returns a User.
:param authorize: Function to authorize the requesting user.
Note: `authenticate` must also be present.
:param jsonize_response: Should the response body be wrapped in JSON?
If so, the response's body will be a string-ified json dict
of the following form: `{"success": true, "result": ...}`
If `jsonize_response` is `True` but the function's signature
shows a return value of `None` or `NoReturn`, and the function
does in fact return `None`, the body will not have a "result"
attribute, only "success".
If `jsonize_response` is `True` and the returned value is a dict,
that value will be merged with a dict: `{"success": True}`
:param keep_event: Should the `event` dict be passed to the
wrapped function from AWS Lambda?
:param keep_context: Should the `context` object be passed to the
wrapped function from AWS Lambda?
:param pass_auth_user: If authentication function supplied,
should `authUser` be passed as a kwarg to the wrapped function?
:returns: Decorated lambda handler function
"""
# Check authorize/authenticate
if authorize is not None:
assert authenticate is not None, "If `authorize` is not `None`, "+\
"`authenticate` can't be `None`."
def wrapper(fn: Callable):
# Get the function's return type, to use later when
# deciding how to format response
return_type = args.get_return_type(fn)
@ft.wraps(fn)
def inner(event: dict, context) -> dict:
# Store function arguments
kwargs = {}
if authenticate is not None:
# Authenticate the user
try:
user = authenticate(event)
except errors.HttpError as e:
return e.json()
if authorize is not None:
# Authorize the user
try:
if not authorize(user):
raise errors.AuthorizationError()
except errors.HttpError as e:
return e.json()
# Does the user want the authorized
# user as an argument?
if pass_auth_user:
kwargs["authUser"] = user
# Get the query/path/body/header params
if body:
try:
kwargs["body"] = body(event["body"]) if callable(body) else event["body"]
except Exception as e:
return errors.RequestParseError().json(
f"Unable to read request body."
)
if pathParams:
try:
kwargs["path"] = pathParams(event["pathParameters"]) if callable(pathParams) \
else event["pathParameters"]
except Exception as e:
return errors.RequestParseError().json(
f"Unable to read request path parameters."
)
if queryString:
try:
kwargs["query"] = queryString(event["queryStringParameters"]) if callable(queryString) \
else event["queryStringParameters"]
except Exception as e:
return errors.RequestParseError().json(
f"Unable to read request query string parameters."
)
if headers:
try:
kwargs["headers"] = headers(event["headers"]) if callable(headers) else event["headers"]
except Exception as e:
return errors.RequestParseError().json(
f"Unable to read request headers."
)
# Add event/context if requested
if keep_event:
kwargs["event"] = event
if keep_context:
kwargs["context"] = context
# Call the function
try:
res = fn(**kwargs)
except errors.HttpError as e:
return e.json()
except Exception as e:
print(f"UNCAUGHT ERROR: \"{e}\"")
return errors.InternalServerError().json()
# Return a response
if jsonize_response:
# If there isn't a return (as expected)
# just return the success-ness
if res is None and return_type in (None, NoReturn):
return {
"statusCode": 200,
"body": json.dumps({
"success": True,
})
}
# If the response is a dict, merge
# it with the `success`-ness flag
if isinstance(res, dict):
return {
"statusCode": 200,
"body": json.dumps({
"success": True,
**res
})
}
# Otherwise (if result isn't a dict)
# return it as the value to key "result"
return {
"statusCode": 200,
"body": json.dumps({
"success": True,
"result": res,
})
}
else:
# If not json-izing the response, pass
# it as the value to the key "body"
# (still with a status-code of 200)
return {
"statusCode": 200,
"body": res
}
return inner
return wrapper | 4ccdbfc843fd07197819fae730faec97dc2316f7 | 5,151 |
import logging
def get_logger(name=None):
"""return a logger
"""
global logger
if logger is not None: return logger
print('Creating logger========================================>')
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)s][%(levelname)s]{%(pathname)s:%(lineno)d} %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger | 34e0aa41b3e8c878574e1ab57eff41238b291672 | 5,152 |
import re
def LF_degen_spine(report):
"""
Checking for degenerative spine
"""
reg_01 = re.compile('degen',re.IGNORECASE)
reg_02 = re.compile('spine',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_01.search(s) and reg_02.search(s):
return ABNORMAL_VAL
return ABSTAIN_VAL | d0211476a3f179c26648546c21176866bad7c61e | 5,153 |
def make_log_format(fields, sep=" - "):
"""
Build a custom log format, as accepted by the logging module, from a list of field names.
:param fields: list or tuple of str - names of fields to use in log messages
:param sep: str - separator to put between fields. Default is ' - '
:return: a log format string usable to configure log formatters
"""
assert all(f in log_fields for f in fields), "Only fields from {} are valid".format(
tuple(log_fields)
)
return sep.join("%({}){}".format(f, log_fields[f]) for f in fields) | 7e05f4bced180ef98025576e9fa1b2cf4f296b92 | 5,154 |
def tweets_for(type, args, per_user=None):
"""
Retrieve tweets for a user, list or search term. The optional
``per_user`` arg limits the number of tweets per user, for
example to allow a fair spread of tweets per user for a list.
"""
lookup = {}
lookup[type] = args[0].strip("\"'")
tweets = Tweet.objects.get_for(**lookup)
if per_user is not None:
_tweets = defaultdict(list)
for tweet in tweets:
if len(_tweets[tweet.user_name]) < per_user:
_tweets[tweet.user_name].append(tweet)
tweets = sum(_tweets.values(), [])
tweets.sort(key=lambda t: t.created_at, reverse=True)
if len(args) > 1 and args[-1].isdigit():
tweets = tweets[:int(args[-1])]
return tweets | ae393d887de9d87a13c3d46a30bcc08d78867827 | 5,155 |
def sum_var(A):
"""summation over axis 1 (var) equivalent to np.sum(A, 1)"""
if issparse(A):
return A.sum(1).A1
else:
return np.sum(A, axis=1) if A.ndim > 1 else np.sum(A) | af866cb018a46746456efdb2e0c013a6410f9be4 | 5,156 |
def success_schema():
"""Pytest fixture for successful SchemaModel object"""
scm = SchemaVersion("1.0")
scm.success = True
return scm | c7a918a1be0d77607bccdedf80c3acaf5a56bd32 | 5,157 |
def _interfaces(config):
""" list system interfaces based on shape """
shape = lib.metadata.get_instance()['shape']
print
if config.getboolean('DEFAULT', 'auto') is True:
interfaces = lib.interfaces.get_interfaces_by_shape(shape)
else:
interfaces = config['DEFAULT']['interfaces'].split(',')
return interfaces | 7ea4d493293d910532b514edf4ec7efee2253a34 | 5,158 |
def getColumninfo(columns):
"""
See ElementFaceToThickness.
"""
ColumnC, problematicColumns = ElementFaceToThickness(columns)
return ColumnC | 985fbdabf95932ae4a8b57169ad6e1aaaa36f146 | 5,159 |
from typing import Any
from typing import Optional
def script(
command: str, inputs: Any = [], outputs: Any = NULL, tempdir=False, **task_options
) -> Any:
"""
Execute a shell script as a redun task with file staging.
"""
if outputs == NULL:
outputs = File("-")
command_parts = []
# Prepare tempdir if requested.
temp_path: Optional[str]
if tempdir:
temp_path = mkdtemp(suffix=".tempdir")
command_parts.append('cd "{}"'.format(temp_path))
else:
temp_path = None
# Stage inputs.
command_parts.extend(input.render_stage() for input in iter_nested_value(inputs))
# User command.
command_parts.append(get_wrapped_command(prepare_command(command)))
# Unstage outputs.
file_stages = [value for value in iter_nested_value(outputs) if isinstance(value, Staging)]
command_parts.extend(file_stage.render_unstage() for file_stage in file_stages)
full_command = "\n".join(command_parts)
# Get input files for reactivity.
def get_file(value: Any) -> Any:
if isinstance(value, Staging):
# Staging files and dir turn into their remote versions.
cls = type(value.remote)
return cls(value.remote.path)
else:
return value
input_args = map_nested_value(get_file, inputs)
return _script(
full_command, input_args, outputs, task_options=task_options, temp_path=temp_path
) | fb7b404d7d46680240863778b541afa83dec4528 | 5,160 |
import requests
def get_forms(console: Console, sess: requests.Session, form_id: str = "General_Record_2020v2.0"):
"""
Method to get every form for a given FormID
"""
raw_resp = get_url(url=f"https://forms.agterra.com/api/{form_id}/GetAll/0", sess=sess)
if raw_resp.status_code != 200:
console.log(f"[red] Something went wrong, we got status [white]{raw_resp.status_code}")
json_data = raw_resp.json()
console.log(f"Message Data: {json_data}")
json_data = raw_resp.json()
return json_data | 129a8789a51db7a6e043fe6c8fbb30c1af984a74 | 5,161 |
def load_dataset(input_files,
input_vocab,
mode,
batch_size=32,
min_seq_len=5,
num_buckets=4):
"""Returns an iterator over the training data."""
def _make_dataset(text_files, vocab):
dataset = tf.data.TextLineDataset(text_files)
dataset = dataset.map(lambda x: tf.string_split([x]).values)
dataset = dataset.map(vocab.lookup)
return dataset
def _key_func(x):
if mode == constants.TRAIN:
bucket_width = 6
bucket_id = x["length"] // bucket_width
bucket_id = tf.minimum(bucket_id, num_buckets)
return tf.to_int64(bucket_id)
else:
return 0
def _reduce_func(unused_key, dataset):
return dataset.padded_batch(batch_size,
padded_shapes={
"ids": [None],
"ids_in": [None],
"ids_out": [None],
"ids_in_out": [None],
"length": [], },
)
bos = tf.constant([constants.START_OF_SENTENCE_ID], dtype=tf.int64)
eos = tf.constant([constants.END_OF_SENTENCE_ID], dtype=tf.int64)
# Make a dataset from the input and translated file.
input_dataset = _make_dataset(input_files, input_vocab)
dataset = tf.data.Dataset.zip(input_dataset)
if mode == constants.TRAIN:
dataset = dataset.shuffle(200000)
# Define the input format.
dataset = dataset.map(lambda x: {
"ids": x,
"ids_in": tf.concat([bos, x], axis=0),
"ids_out": tf.concat([x, eos], axis=0),
"ids_in_out": tf.concat([bos, x, eos], axis=0),
"length": tf.shape(x)[0]})
# Filter out invalid examples.
if mode == constants.TRAIN:
dataset = dataset.filter(lambda x: tf.greater(x["length"], min_seq_len - 1))
# Batch the dataset using a bucketing strategy.
dataset = dataset.apply(tf.contrib.data.group_by_window(
_key_func,
_reduce_func,
window_size=batch_size))
return dataset.make_initializable_iterator() | b71c6c8aa1bd2143c911fdd9e7e4ec1526656a39 | 5,162 |
def _get_results(**kwargs):
"""
Generate a command with the parameters, run it, and return the
normalized results
"""
output, error, rc = testoob.run_cmd.run_command(_generate_command(**kwargs))
return tt._normalize_newlines(output), tt._normalize_newlines(error), rc | 83dc64973fe4cfafd56391186361d3dbcc485f7d | 5,163 |
def infer_from_discretized_mix_logistic(params):
"""
Sample from discretized mixture of logistic distributions
Args:
params (Tensor): B x C x T, [C/3,C/3,C/3] = [logit probs, means, log scales]
Returns:
Tensor: sample in range of [-1, 1].
"""
log_scale_min = float(np.log(1e-14))
assert params.shape[1] % 3 == 0
nr_mix = params.shape[1] // 3
# B x T x C
y = params #np.transpose(params, (1, 0))
logit_probs = y[:, :nr_mix]
temp = np.random.uniform(low=1e-5, high=1.0 - 1e-5, size=logit_probs.shape)
temp = logit_probs - np.log(- np.log(temp))
argmax = np.argmax(temp, axis=-1)
one_hot = get_one_hot(argmax, nr_mix).astype(dtype=float)
means = np.sum(y[:, nr_mix:2 * nr_mix] * one_hot, axis=-1)
log_scales = np.clip(np.sum(
y[:, 2 * nr_mix:3 * nr_mix] * one_hot, axis=-1), a_min=log_scale_min, a_max=None)
u = np.random.uniform(low=1e-5, high=1.0 - 1e-5, size=means.shape)
x = means + np.exp(log_scales) * (np.log(u) - np.log(1. - u))
x = np.clip(x, a_min=-1., a_max=1.)
return x | 993e5c64abd0b623057256b868c7e94570e28574 | 5,164 |
from pathlib import Path
import glob
import os
def get_sorted_file_paths(file_path, file_extension=None, encoding=None):
"""
Sorts file paths with numbers "naturally" (i.e. 1, 2, 10, a, b), not
lexiographically (i.e. 1, 10, 2, a, b).
:param str file_path: File containing file_paths in a text file,
or as a list.
:param str file_extension: Optional file extension (if a directory
is passed)
:param encoding: If opening a text file, what encoding it has.
Default: None (platform dependent)
:return: Sorted list of file paths
"""
if isinstance(file_path, list):
return natsorted(file_path)
# assume if not a list, is a file path
file_path = Path(file_path)
if file_path.suffix == ".txt":
return get_text_lines(file_path, sort=True, encoding=encoding)
elif file_path.is_dir():
if file_extension is None:
file_path = glob.glob(os.path.join(file_path, "*"))
else:
file_path = glob.glob(
os.path.join(file_path, "*" + file_extension)
)
return natsorted(file_path)
else:
message = (
"Input file path is not a recognised format. Please check it "
"is a list of file paths, a text file of these paths, or a "
"directory containing image files."
)
raise NotImplementedError(message) | 75c715ce7e4c34e528cf08b7814d5706a6c6b990 | 5,165 |
from pathlib import Path
def _load_reft_data(reft_file, index_name="btl_fire_num"):
"""
Loads reft_file to dataframe and reindexes to match bottle data dataframe
"""
reft_data = pd.read_csv(reft_file, usecols=["btl_fire_num", "T90", "REFTMP_FLAG_W"])
reft_data.set_index(index_name)
reft_data["SSSCC_TEMP"] = Path(reft_file).stem.split("_")[0]
reft_data["REFTMP"] = reft_data["T90"]
return reft_data | c9ae2a9d5212f5d9234fc95fb4cc008688db07b4 | 5,166 |
def commit_veto(environ, status, headers):
"""Veto a commit.
This hook is called by repoze.tm in case we want to veto a commit
for some reason. Return True to force a rollback.
By default we veto if the response's status code is an error code.
Override this method, or monkey patch the instancemethod, to fine
tune this behaviour.
"""
return not 200 <= int(status.split(None, 1)[0]) < 400 | 9fc96fe8cdbedde20cb325e189b71d9df94cf176 | 5,167 |
def rate_limited_api(view_func):
"""
Checks users last post to rate limited endpoints
(adding comments or recipes) and rejects if within timeout period
for api requests (returns JSON response)
"""
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
exceeded, msg = request.user.profile.rate_limit_exceeded()
if exceeded:
return JsonResponse({"error": msg})
else:
return view_func(request, *args, **kwargs)
return _wrapped_view | 3db16cd742339015efbbb9016a32a91e902453a3 | 5,168 |
from .tvdfunctions import CalculateTVD
from .backend import fetchoptions as fo
from .backend.exceptions import TVDLimiterFunctionInputError
def SecondOrderTVD(Uo, Courant, diffX, LimiterFunc, Limiter, Eps=0.01):
"""Return the numerical solution of dependent variable in the model eq.
This function uses the
explicit second-order TVD method and their various
Limiter functions and Limiters
to obtain the solution of the 1D non-linear viscous Burgers equation.
Call signature:
SecondOrderTVD(Uo, Courant, diffX, LimiterFunc, Limiter, Eps)
Parameters
----------
Uo: ndarray[float], =1d
The dependent variable at time level, n within the entire domain.
(Non-dimensionalized quantity)
Courant: float
Courant number that appears in the convection component of the PDE.
diffX: float
Diffusion number for x-component that appears in the diffusion
component of the PDE.
LimiterFunc: str
Flux limiter function.
Limiter:
Limiter type.
Eps: float, optional
A positive constant in the entropy correction term, si in Eq. 6-127
in CFD Vol. 1 by Hoffmann. Its value must be between 0 and 0.125.
Default is 0.1.
Returns
-------
U: ndarray[float], =1d
The dependent variable at time level, n+1 within the entire domain.
(Non-dimensionalized quantity)
"""
shapeU = Uo.shape # Obtain Dimension
if len(shapeU) == 2:
raise DimensionError("2D", "viscous Bergers", "second-order TVD")
iMax, = shapeU
U = Uo.copy() # Initialize U
E = Uo*Uo/2
fetch = fo.FetchOptions()
limfunc_options = fetch.TVDLimiterFunctionOptions()
if LimiterFunc not in limfunc_options:
raise TVDLimiterFunctionInputError(LimiterFunc)
for i in range(2, iMax-2):
phiPlus, phiMinus = CalculateTVD(i, Uo, E, Eps, Courant,
Limiter, LimiterFunc)
# Equation 6-124 and 6-125 in Hoffmann Vol. 1
hPlus = 0.5 * (E[i+1]+E[i]+phiPlus)
hMinus = 0.5 * (E[i]+E[i-1]+phiMinus)
# Calculate diffusion terms in the viscous Bergers equation.
# Equation 7-58
diffusion = diffX*(Uo[i+1] - 2.0*Uo[i] + Uo[i-1])
# Equation 6-123
U[i] = Uo[i] - Courant*(hPlus-hMinus) + diffusion
return U | 3433d3af49d1972868af7e21f02249c82de1a549 | 5,169 |
def login_required(func):
""" Allow only auth users """
async def wrapped(self, *args, **kwargs):
if self.request.user is None:
add_message(self.request, "LogIn to continue.")
redirect(self.request, "sign_in")
return await func(self, *args, **kwargs)
return wrapped | 80837caa726ce46e4728141208a575b25fe5dcb6 | 5,170 |
def _hunnyb_search_func(name):
"""search function required by ``codecs.register``"""
if name in (HUNNYB_ENC_NAME,) + HB_ALIASES:
return (_encode, _decode, None, None) | c9b1a6b68da2706d7568858d7211593e0bfa4086 | 5,171 |
def fingerprint_file(file):
"""Open, read file and calculate MD5 on its contents"""
with open(file,'rb') as fd:
# read contents of the file
_file_data = fd.read()
# pipe contents of the file through
file_fingerprint = md5(_file_data).hexdigest()
return file_fingerprint | 030412ad6a057b2cd2aae4032e6122df73817e41 | 5,172 |
def toOTLookup(self, font, ff):
"""Converts a fontFeatures.Routine object to binary.
Args:
font: A ``TTFont`` object.
ff: The parent ``FontFeatures`` object containing this routine.
Returns a list of ``fontTools.otlLib.builder`` Builder objects allowing this
routine to be converted to binary layout format.
"""
lookuptypes = [x.lookup_type() for x in self.rules]
if not all([lu == lookuptypes[0] for lu in lookuptypes]):
raise ValueError("For now, a routine can only contain rules of the same type")
if not all([self.rules[0].flags == rule.flags for rule in self.rules]):
raise ValueError("For now, a routine can only contain rules of the same flags")
self.flags = self.rules[0].flags
if self.stage == "pos":
return buildPos(self, font, lookuptypes[0], ff)
if self.stage == "sub":
return buildSub(self, font, lookuptypes[0], ff) | ea08870cfec146135584bb8e85f2e861adfa3e05 | 5,173 |
def apply_to_all(func, results, datasets):
"""Apply the given function to all results
Args:
func: the function to apply
results: nested dictionary where the nested levels are: algorithm name, sensitive attribute
and split ID
datasets: nested dictionary where the nested levels are: sensitive attribute and split ID
Returns:
a nested dictionary with the same structure as `results` that contains the output of the
given function
"""
output = {}
for algo in results:
output[algo] = {}
for sensitive in results[algo]:
output[algo][sensitive] = {}
for split_id in results[algo][sensitive]:
output[algo][sensitive][split_id] = func(
results[algo][sensitive][split_id], datasets[sensitive][split_id])
return output | 6ea085b3541a84ac97f63389ba83c3a06d5e0b85 | 5,174 |
def any_value_except(mapping, excluded_keys):
"""Return a random value from a dict that is not associated with
excluded_key. Raises StopIteration if there are no other keys than
excluded_key"""
return next(mapping[key] for key in mapping if key not in excluded_keys) | 8d633713b93cfd1f0324d5c4a56a18fa7931ff06 | 5,175 |
import torch
def one_hot(y, num_dim=10):
"""
One Hot Encoding, similar to `torch.eye(num_dim).index_select(dim=0, index=y)`
:param y: N-dim tenser
:param num_dim: do one-hot labeling from `0` to `num_dim-1`
:return: shape = (batch_size, num_dim)
"""
one_hot_y = torch.zeros(y.size(0), num_dim)
if y.is_cuda:
one_hot_y = one_hot_y.cuda()
return one_hot_y.scatter_(1, y.view(-1, 1), 1.) | 694bfea18ecbb5c5737e0d38c0aa0f5f52a82a55 | 5,176 |
import os
def relate(target_file, start_file):
"""
Returns relative path of target-file from start-file.
"""
# Default os.path.rel_path takes directories as argument, thus we need
# strip the filename if present in the paths else continue as is.
target_dir, target_base = os.path.split(target_file)
start_dir = os.path.dirname(start_file)
# Calculate the relative path using the standard module and then concatenate
# the file names if they were previously present.
return os.path.join(os.path.relpath(target_dir, start_dir), target_base) | 0882a3a4c804816096eb6e9c9a0c18dac5a55b57 | 5,177 |
def IdentityMatrix():
"""Creates an identity rotation matrix.
Returns a rotation matrix that has no effect on orientation.
This matrix can be the starting point for other operations,
such as using a series of calls to #Pivot to
create a custom rotation matrix.
Returns
-------
RotationMatrix
The identity rotation matrix.
"""
return RotationMatrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
]) | f156a67000fb36360134d3c696dc9caefebf736a | 5,178 |
def compute_K_from_vanishing_points(vanishing_points):
"""Compute intrinsic matrix given vanishing points.
Args:
vanishing_points: A list of vanishing points.
Returns:
K: The intrinsic camera matrix (3x3 matrix).
"""
# vanishing points used
v1 = vanishing_points[0]
v2 = vanishing_points[1]
v3 = vanishing_points[2]
# construct constraint matrix A from each pair of vanishing points
A = np.zeros((3, 3))
# 1 + 2
vi = v1
vj = v2
A[0] = np.array([(vi[0]*vj[0]+vi[1]*vj[1]), (vi[0]+vj[0]), (vi[1]+vj[1])])
# 1 + 3
vi = v1
vj = v3
A[1] = np.array([(vi[0]*vj[0]+vi[1]*vj[1]), (vi[0]+vj[0]), (vi[1]+vj[1])])
# 2 + 3
vi = v2
vj = v3
A[2] = np.array([(vi[0]*vj[0]+vi[1]*vj[1]), (vi[0]+vj[0]), (vi[1]+vj[1])])
# add one column of ones
A_ones = np.ones((A.shape[0], 1))
A = np.hstack((A, A_ones))
# SVD
U, s, VT = np.linalg.svd(A)
w = VT[-1, :]
omega = np.array([[w[0], 0, w[1]],
[0, w[0], w[2]],
[w[1], w[2], w[3]]])
# find K matrix from omega
KT_inv = np.linalg.cholesky(omega)
K = np.linalg.inv(KT_inv.T)
# normalize
K /= K[2, 2]
return K | 972cba32caee46d9d9c7ed30a3f4ad23bfafe070 | 5,179 |
import os
def getProgFromFile(f):
"""Get program name from __file__.
"""
if f.endswith(".py"):
f = f[:-3]
return os.path.basename(f) | 474c9b3f2bef2117daf8456d8b6f026d738182a1 | 5,180 |
def _tpd2vec(seq, dtype=float):
"""
Convert a tpd file string to a vector, return a NumPy array.
EXAMPLES:
>>> _tpd2vec('1|13|4; 20; 25|28')
array([ 1., 5., 9., 13., 20., 25., 26., 27., 28.])
>>> _tpd2vec('5.5; 1.2@3; 3|7|2')
array([ 5.5, 1.2, 1.2, 1.2, 3. , 5. , 7. ])
>>> _tpd2vec(' ')
array([], dtype=float64)
"""
finalvec = np.array([], dtype)
for s in seq.split(';'):
if s.count('|'):
values = [dtype(v) for v in s.split('|')]
values[1] += 1
vec = np.arange(*values)
elif s.count('@'):
value, num = s.split('@')
try:
vec = np.ones(int(num)) * dtype(value)
except ValueError:
raise ValueError('%s is incorrectly specified' % seq)
else:
try:
vec = [dtype(s)]
except ValueError:
vec = np.array([], dtype)
finalvec = np.append(finalvec, vec)
return finalvec | c561852d27025fc4f7db7f027fba0e18b2ca157c | 5,181 |
from typing import Dict
def get_notification_html(*, notification_type: str, options: Dict, sender: str) -> str:
"""
Returns the formatted html for the notification based on the notification_type
:return: A string representing the html markup to send in the notification
"""
validate_options(options=options)
url_base = app.config['FRONTEND_BASE']
resource_url = '{url_base}{resource_path}?source=notification'.format(resource_path=options.get('resource_path'),
url_base=url_base)
joined_chars = resource_url[len(url_base) - 1:len(url_base) + 1]
if joined_chars.count('/') != 1:
raise Exception('Configured "FRONTEND_BASE" and "resource_path" do not form a valid url')
notification_strings = NOTIFICATION_STRINGS.get(notification_type)
if notification_strings is None:
raise Exception('Unsupported notification_type')
greeting = 'Hello,<br/>'
notification = notification_strings.get('notification', '').format(resource_url=resource_url,
resource_name=options.get('resource_name'),
sender=sender)
comment = notification_strings.get('comment', '')
end_note = notification_strings.get('end_note', '')
salutation = '<br/>Thanks,<br/>Amundsen Team'
if notification_type == NotificationType.METADATA_REQUESTED:
options_comment = options.get('comment')
need_resource_description = options.get('description_requested')
need_fields_descriptions = options.get('fields_requested')
if need_resource_description and need_fields_descriptions:
notification = notification + 'and requests improved table and column descriptions.<br/>'
elif need_resource_description:
notification = notification + 'and requests an improved table description.<br/>'
elif need_fields_descriptions:
notification = notification + 'and requests improved column descriptions.<br/>'
else:
notification = notification + 'and requests more information about that resource.<br/>'
if options_comment:
comment = ('<br/>{sender} has included the following information with their request:'
'<br/>{comment}<br/>').format(sender=sender, comment=options_comment)
if notification_type == NotificationType.DATA_ISSUE_REPORTED:
greeting = 'Hello data owner,<br>'
data_issue_url = options.get('data_issue_url')
comment = comment.format(data_issue_url=data_issue_url)
return '{greeting}{notification}{comment}{end_note}{salutation}'.format(greeting=greeting,
notification=notification,
comment=comment,
end_note=end_note,
salutation=salutation) | 7996c8f472de89498b04ed6563b893381f680209 | 5,182 |
def step(parents: be.Population, fitness: be.Fitness) -> tuple:
"""
The step function defines how an algorithm generation will be conducted. This function must receive a population and
a fitness object and return another population. In this case we will define the parameters of the algorithm within
the function itself and use report objects to monitor the evolution of the population.
In this algorithm the main steps consists of:
1. Get elite -> Elite
2. Apply tournament selection -> Best individuals
3. Apply one point cross over to best individuals -> Offspring
4. Mutate offspring
5. Evaluate offspring
6. Annihilate worst individuals in offspring and replace them with the best.
7. Merge elite and offspring -> Population for next generation
"""
# Put parameters
recombination_schema = 'one_point_i' # Alternatives: 'n_point_i' or 'uniform_i'
mutation_schema = 'random_resetting' # Alternatives: 'creep'
mutation_probability = 0.1
max_mutation_events = 2
ranking_selection_schema = 'tournament' # Alternatives: 'roulette' or 'sus'
tournament_k = 2
tournament_w = 1
tournament_replacement = False
elitism_percentage = 0.1
# Get elite
elite = be.survivor_selection(population=parents, schema='elitism', select=elitism_percentage)
# Apply selection to get the mating pool
mating_pool = be.ranking_selection(
population=parents, n=len(parents) - len(elite), schema=ranking_selection_schema,
w=tournament_w, k=tournament_k, replacement=tournament_replacement)
# Generate offspring
offspring = be.recombination(population=mating_pool, n=len(mating_pool), schema=recombination_schema)
# Mutate offspring
be.mutation(population=offspring, probability=mutation_probability, schema=mutation_schema,
max_mutation_events=max_mutation_events)
# Evaluate offspring
be.evaluate_parallel(population=offspring, fitness_function=fitness)
# Merge elite and offspring
next_generation = be.merge_populations(offspring, elite)
report.create_report(population=next_generation, population_name='Population', increment_generation=True)
# With this indicator we keep the best solution of each generation
return next_generation, be.SaveBestSolution(next_generation) | 700c5a9a28145b9454fc68356eab328a84418461 | 5,183 |
def asset_dividend_record(self, **kwargs):
"""Asset Dividend Record (USER_DATA)
Query asset dividend record.
GET /sapi/v1/asset/assetDividend
https://binance-docs.github.io/apidocs/spot/en/#asset-dividend-record-user_data
Keyword Args:
asset (str, optional)
startTime (int, optional)
endTime (int, optional)
limit (int, optional): Default 20, max 500
recvWindow (int, optional): The value cannot be greater than 60000
"""
return self.sign_request("GET", "/sapi/v1/asset/assetDividend", kwargs) | 80ecbf4f03bb4431829130f3da546b937cf53d13 | 5,184 |
def heuristic(node_1, node_2):
""" Heuristic when only 4 directions are posible (Manhattan) """
(x_node_1, y_node_1) = node_1
(x_node_2, y_node_2) = node_2
return abs(x_node_1 - x_node_2) + abs(y_node_1 - y_node_2) | e431ed9d8a7acb34604b3e83c3f3d7774cd27d51 | 5,185 |
def exercise_2(inputs): # DO NOT CHANGE THIS LINE
"""
Output should be the name of the class.
"""
output = Party
return output # DO NOT CHANGE THIS LINE | 6bf574921760aa2569d0a44ced8b9a3712d67faa | 5,186 |
def undistort(img, mtx, dist):
"""Undistort an image using camera matrix and distortion coefficients"""
h, w = img.shape[:2]
# return undistorted image with minimum unwanted pixels. It's okay to remove some pixesl at image corners.
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 0, (w,h))
undist = cv2.undistort(img, mtx, dist, None, newcameramtx)
return undist | e8d32a8662a998c90f856116b97e555f2bdfeee4 | 5,187 |
def get_order(order_id, sandbox=False):
"""Get a single order using the Sell Fulfillment API."""
return single_api_call('sell_fulfillment_get_order', order_id=order_id,
field_groups='TAX_BREAKDOWN', sandbox=sandbox) | 74054dc63e6d57f162f6099389fa9c1870d8e08d | 5,188 |
def extend_dict(x, *y):
"""Similar to Object.assign() / _.extend() in Javascript, using
'dict.update()'
Args:
x (dict): the base dict to merge into with 'update()'
*y (dict, iter): any number of dictionary or iterable key/value
pairs to be sequentially merged into 'x'. Skipped if None.
"""
z = x.copy()
for d in [d for d in y if d is not None]:
z.update(d)
return z | f10a5bc7d5ed3646e6a9f8f9535a16bd800c7fcd | 5,189 |
def ErrorCriteria(errors):
"""Monitor the number of unexpected errors logged in the cluster. If more than five
errors have occurred on the cluster during this time period, post an alert. Posts a
warning if between one and four errors have occurred.
"""
ERROR_ALERT_THRESHOLD = 5
alerts = []
warnings = []
if errors['cluster_total'] > ERROR_ALERT_THRESHOLD:
alerts.append(CLUSTER_TOKEN)
elif errors['cluster_total'] > 0:
warnings.append(CLUSTER_TOKEN)
return alerts, warnings | 0b388ca55009bb5219bd30ead91ce67521c0e743 | 5,190 |
def bdnyc_skyplot():
"""
Create a sky plot of the database objects
"""
# Load the database
db = astrodb.Database('./database.db')
t = db.query('SELECT id, ra, dec, shortname FROM sources', fmt='table')
# Convert to Pandas data frame
data = t.to_pandas()
data.index = data['id']
# Remove objects without RA/Dec
num_missing = np.sum(pd.isnull(data['ra']))
if num_missing > 0:
warning_message = 'Note: {} objects had missing coordinate information and were removed.'.format(num_missing)
data = data[pd.notnull(data['ra'])]
else:
warning_message = ''
# Coerce to numeric
data['ra'] = pd.to_numeric(data['ra'])
data['dec'] = pd.to_numeric(data['dec'])
# Coordinate conversion
c = SkyCoord(ra=data['ra'] * u.degree, dec=data['dec'] * u.degree)
pi = np.pi
proj = 'hammer'
data['x'], data['y'] = projection(c.ra.radian - pi, c.dec.radian, use=proj)
data['l'], data['b'] = c.galactic.l, c.galactic.b
# Make the plots
p1 = make_sky_plot(data, proj)
data['x'], data['y'] = projection(c.galactic.l.radian - pi, c.galactic.b.radian, use=proj)
p2 = make_sky_plot(data, proj)
tab1 = Panel(child=p1, title="Equatorial")
tab2 = Panel(child=p2, title="Galactic")
tabs = Tabs(tabs=[tab1, tab2])
script, div = components(tabs)
return render_template('skyplot.html', script=script, plot=div, warning=warning_message) | 7ceba0d0b5cf151e5629fefa943fa1a48f62d430 | 5,191 |
def get_model_config(model_name, dataset, params):
"""Map model name to model network configuration."""
model_map = _get_model_map(dataset.name)
if model_name not in model_map:
raise ValueError('Invalid model name \'%s\' for dataset \'%s\'' %
(model_name, dataset.name))
else:
return model_map[model_name](params=params) | 88ce2fbb3415b0d5fa2348b9f9ba5dd029e49a73 | 5,192 |
def post_search(request):
"""Allow text matching search. """
form = SearchForm()
query = None
results = []
if 'query' in request.GET: # check if result is submitted by looking for query
form = SearchForm(request.GET)
if form.is_valid():
query = form.cleaned_data['query']
# results = Post.objects.annotate(search=SearchVector('title','body'),).filter(search=query)
# a search is more relevant if the search term is in the title
"""
Search weights are D,C,B and A corresponding to 0.1,0.2,0.4 and 1.0
"""
search_vector = SearchVector('title', weight='A') + SearchVector('body',weight='B')
search_query = SearchQuery(query)
# filter results to display only the ones ranking higher than 0.3
results = Post.objects.annotate(search=search_vector,rank=SearchRank(search_vector,search_query)
).filter(rank__gte=0.3).order_by('-rank')
return render(request,'blog/post/search.html', {'form':form, 'query':query, 'results':results}) | ff6f36f28a0dbaaba8957049eb2fc64ff76470dc | 5,193 |
import numba
def PrimacyCodingNumeric_receptor_activity_monte_carlo_numba_generator(conc_gen):
""" generates a function that calculates the receptor activity for a given
concentration generator """
func_code = receptor_activity_monte_carlo_numba_template.format(
CONCENTRATION_GENERATOR=conc_gen)
# make sure all necessary objects are in the scope
scope = {'np': np, 'nlargest_indices_numba': nlargest_indices_numba}
exec(func_code, scope)
func = scope['function']
return numba.jit(nopython=NUMBA_NOPYTHON, nogil=NUMBA_NOGIL)(func) | 8ca9758227fe6b7e57269e929a6a7dc4a7d6b549 | 5,194 |
def subtoken_counts(proposed, ground_truth):
"""
Compute the number of precise tokens, proposed tokens and ground truth tokens
from two strings representing tokens.
"""
gt_subtokens = set(compute_subtokens(ground_truth))
proposed_subtokens = set(compute_subtokens(proposed))
precise_subtokens = proposed_subtokens.intersection(gt_subtokens)
return len(precise_subtokens), len(proposed_subtokens), len(gt_subtokens) | 496abf452a09c521b71acfe2951232b5a4c7b40d | 5,195 |
import random
def welcome():
""" Define welcome reply """
hello = random.choice(_HELLO_)
nick = random.choice(_NICK_NAME_)
welcome = random.choice(_WELCOME_)
proposal = random.choice(_PROPOSAL_)
return hello + " " + nick + ", " + welcome + " ! " + proposal + " ?" | 87da460bde7bae59e54c108a68291d8c3b4258de | 5,196 |
def EDCN(linear_feature_columns,
dnn_feature_columns,
bridge_type='attention_pooling',
tau=0.1,
use_dense_features=True,
cross_num=2,
cross_parameterization='vector',
l2_reg_linear=1e-5,
l2_reg_embedding=1e-5,
l2_reg_cross=1e-5,
l2_reg_dnn=0,
seed=10000,
dnn_dropout=0,
dnn_use_bn=False,
dnn_activation='relu',
task='binary'):
"""Instantiates the Enhanced Deep&Cross Network architecture.
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param bridge_type: The type of bridge interaction, one of 'pointwise_addition', 'hadamard_product', 'concatenation', 'attention_pooling'
:param tau: Positive float, the temperature coefficient to control distribution of field-wise gating unit
:param use_dense_features: Whether to use dense features, if True, dense feature will be projected to sparse embedding space
:param cross_num: positive integet,cross layer number
:param cross_parameterization: str, ``"vector"`` or ``"matrix"``, how to parameterize the cross network.
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_cross: float. L2 regularizer strength applied to cross net
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not DNN
:param dnn_activation: Activation function to use in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
if cross_num == 0:
raise ValueError("Cross layer num must > 0")
if bridge_type == 'pointwise_addition':
BridgeLayer = tf.keras.layers.Add
elif bridge_type == 'hadamard_product':
BridgeLayer = tf.keras.layers.Multiply
elif bridge_type == 'concatenation':
BridgeLayer = ConcatenationBridge
elif bridge_type == 'attention_pooling':
BridgeLayer = AttentionPoolingLayer
else:
raise NotImplementedError
print('EDCN brige type: ', bridge_type)
features = build_input_features(dnn_feature_columns)
inputs_list = list(features.values())
linear_logit = get_linear_logit(features,
linear_feature_columns,
seed=seed,
prefix='linear',
l2_reg=l2_reg_linear)
sparse_embedding_list, dense_value_list = input_from_feature_columns(
features, dnn_feature_columns, l2_reg_embedding, seed)
# project dense value to sparse embedding space, generate a new field feature
if use_dense_features:
sparse_embedding_dim = sparse_embedding_list[0].shape[-1]
dense_value_feild = concat_func(dense_value_list)
dense_value_feild = DNN([sparse_embedding_dim], dnn_activation,
l2_reg_dnn, dnn_dropout,
dnn_use_bn)(dense_value_feild)
dense_value_feild = tf.expand_dims(dense_value_feild, axis=1)
sparse_embedding_list.append(dense_value_feild)
deep_in = sparse_embedding_list
cross_in = sparse_embedding_list
field_size = len(sparse_embedding_list)
cross_dim = field_size * cross_in[0].shape[-1]
for i in range(cross_num):
deep_in = RegulationLayer(tau)(deep_in)
cross_in = RegulationLayer(tau)(cross_in)
cross_out = CrossNet(1, parameterization=cross_parameterization,
l2_reg=l2_reg_cross)(deep_in)
deep_out = DNN([cross_dim], dnn_activation, l2_reg_dnn,
dnn_dropout, dnn_use_bn, seed=seed)(cross_in)
bridge_out = BridgeLayer()([cross_out, deep_out])
bridge_out_list = tf.split(tf.expand_dims(bridge_out, axis=1), field_size, axis=-1)
deep_in = bridge_out_list
cross_in = bridge_out_list
stack_out = tf.keras.layers.Concatenate()(
[cross_out, deep_out, bridge_out])
final_logit = tf.keras.layers.Dense(1, use_bias=False,
kernel_initializer=tf.keras.initializers.glorot_normal(seed))(stack_out)
final_logit = add_func([final_logit, linear_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model | 61e3f6868613111001420d88b8c9b99f91361653 | 5,197 |
def cart_to_polar(arr_c):
"""Return cartesian vectors in their polar representation.
Parameters
----------
arr_c: array, shape (a1, a2, ..., d)
Cartesian vectors, with last axis indexing the dimension.
Returns
-------
arr_p: array, shape of arr_c
Polar vectors, using (radius, inclination, azimuth) convention.
"""
if arr_c.shape[-1] == 1:
arr_p = arr_c.copy()
elif arr_c.shape[-1] == 2:
arr_p = np.empty_like(arr_c)
arr_p[..., 0] = vector_mag(arr_c)
arr_p[..., 1] = np.arctan2(arr_c[..., 1], arr_c[..., 0])
elif arr_c.shape[-1] == 3:
arr_p = np.empty_like(arr_c)
arr_p[..., 0] = vector_mag(arr_c)
arr_p[..., 1] = np.arccos(arr_c[..., 2] / arr_p[..., 0])
arr_p[..., 2] = np.arctan2(arr_c[..., 1], arr_c[..., 0])
else:
raise Exception('Invalid vector for polar representation')
return arr_p | c4c2256fcc9b01849dc4012ceac017273dcc4ddb | 5,198 |
def createList(listSize):
"""
Creates list block that creates input instances for each element and an output instance for connecting to
the resulting list. List size is limited to 300 elements. Larger lists will be truncated.
:param listSize: The size of the list of point inputs that will be created
:return: A list of the input instances and the output of the list block
"""
listInst = psc.createInstance("ListBlock", "ListInstance")
inputInstances = [None] * listSize
psc.connect(psc.Constant((listSize, 0, 0)), listInst.size)
for i in range(listSize):
inputInstances[i] = getattr(listInst, "element" + str(i))
return {"inputs":inputInstances, "output":listInst.out} | 91b508674ad6f26e9e7dd43cb372fb0804db7ccd | 5,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.