content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from datetime import datetime
def index(request):
"""Magicaltastic front page.
Plugins can register a hook called 'frontpage_updates_<type>' to add
updates to the front page. `<type>` is an arbitrary string indicating
the sort of update the plugin knows how to handle; for example,
spline-forum has a `frontpage_updates_forum` hook for posting news from
a specific forum.
Hook handlers should return a list of FrontPageUpdate objects.
Standard hook parameters are:
`limit`, the maximum number of items that should ever be returned.
`max_age`, the number of seconds after which items expire.
`title`, a name for the source.
`icon`, an icon to show next to its name.
`limit` and `max_age` are also global options.
Updates are configured in the .ini like so:
spline-frontpage.sources.foo = updatetype
spline-frontpage.sources.foo.opt1 = val1
spline-frontpage.sources.foo.opt2 = val2
Note that the 'foo' name is completely arbitrary and is only used for
grouping options together. This will result in a call to:
run_hooks('frontpage_updates_updatetype', opt1=val1, opt2=val2)
Plugins may also respond to the `frontpage_extras` hook with other
interesting things to put on the front page. There's no way to
customize the order of these extras or which appear and which don't, at
the moment. Such hooks should return an object with at least a
`template` attribute; the template will be called with the object
passed in as its `obj` argument.
Local plugins can override the fairly simple index.mako template to
customize the front page layout.
"""
response = request.response
config = request.registry.settings
cache = request.environ.get('beaker.cache', None)
c = request.tmpl_context
updates = []
global_limit = config['spline-frontpage.limit']
global_max_age = max_age_to_datetime(
config['spline-frontpage.max_age'])
c.sources = config['spline-frontpage.sources']
for source in c.sources:
new_updates = source.poll(global_limit, global_max_age, cache)
updates.extend(new_updates)
# Little optimization: once there are global_limit items, anything
# older than the oldest cannot possibly make it onto the list. So,
# bump global_max_age to that oldest time if this is ever the case.
updates.sort(key=lambda obj: obj.time, reverse=True)
del updates[global_limit:]
if updates and len(updates) == global_limit:
global_max_age = updates[-1].time
# Find the oldest unseen item, to draw a divider after it.
# If this stays as None, the divider goes at the top
c.last_seen_item = None
# Could have a timestamp in a cookie
last_seen_time = None
try:
last_seen_time = datetime.datetime.fromtimestamp(
int(request.cookies['frontpage-last-seen-time']))
except (KeyError, ValueError):
pass
if last_seen_time:
for update in updates:
if update.time > last_seen_time:
c.last_seen_item = update
else:
break
# Save ~now~ as the last-seen time
now = datetime.datetime.now().strftime('%s')
response.set_cookie('frontpage-last-seen-time', now)
# Done! Feed to template
c.updates = updates
# Hook for non-update interesting things to put on the front page.
# This hook should return objects with a 'template' attribute, and
# whatever else they need
c.extras = []
return {} | 14e4200c2277e48792fd4d02f0126293a82a9ba8 | 16,200 |
def fd_d1_o4_smoothend(var,grid,mat=False):
"""Centered finite difference, first derivative, 4th order using extrapolation to get boundary points
var: quantity to be differentiated.
grid: grid for var
mat: matrix for the finite-differencing operator. if mat=False then it is created"""
dx = grid[1]-grid[0]
grid0 = np.linspace(grid[0]-2*dx,grid[-1]+2*dx,len(grid)+4)
var0 = interp(grid,var,grid0)
if not mat:
mat=get_mat_fd_d1_o4(len(var0),grid0[1]-grid0[0])
dvar0=-np.dot(mat,var0)
dvar_out=dvar0[2:-2]
return -dvar_out | e1b57204e6fd9fe2839e4fb2e7230dd0f8854841 | 16,201 |
def find_node_pair_solutions(node_pairs, graph):
""" Return path and cost for all node pairs in the path sets. """
node_pair_solutions = {}
counter = 0
for node_pair in node_pairs:
if node_pair not in node_pair_solutions:
cost, path = dijkstra.find_cost(node_pair, graph)
node_pair_solutions[node_pair] = (cost, path)
# Also store the reverse pair
node_pair_solutions[node_pair[::-1]] = (cost, path[::-1])
return node_pair_solutions | f2f742cc1e969b4b60394148508cbb9cacaa3cfc | 16,202 |
import math
def get_step(a, b, marks=1):
"""Return a coordinate set between ``a`` and ``b``.
This function returns a coordinate point between the two provided
coordinates. It does this by determining the angle of the path
between the two points and getting the sine and cosine from that
angle. The returned coordinate will be ``marks`` away from ``a``.
It is worth noting that if the distance between the two points,
calculated by ``get_distance``, is less than the value of ``marks``,
then a copy of ``b`` is returned.
Args:
a (list): A tuple is also acceptable. This list will have two
items, either ``int``s or ``float``s.
b (list): Exactly the same requirements as ``a``. It can (and
usually will be) a different coordinate.
marks (:obj:`int`, optional): One mark is the measurement
between two adjacent coordinates. To step over a greater
number of coordinates, increase the number of ``marks``.
Returns:
tuple: The returned tuple is a new coordinate set. The location
of the coordinates is determined by ``marks`` and angle
connecting ``a`` and ``b``.
"""
if get_distance(a, b) <= marks:
return b[:]
angle = math.atan2(
-(a[1] - b[1]),
-(a[0] - b[0]),
)
return (
(math.cos(angle) * marks) + a[0],
(math.sin(angle) * marks) + a[1],
) | e242823df263f1cee28409ef3f984f9b3066dad5 | 16,203 |
import torch
def get_model_mask_neurons(model, layers):
"""
Defines a dictionary of type {layer: tensor} containing for each layer of a model, the binary mask representing
which neurons have a value of zero (all of its parameters are zero).
:param model: PyTorch model.
:param layers: Tuple of layers on which apply the threshold procedure. e.g. (nn.modules.Conv2d, nn.modules.Linear)
:return: Mask dictionary.
"""
mask = {}
for n_m, mo in model.named_modules():
if isinstance(mo, layers):
for n_p, p in mo.named_parameters():
name = "{}.{}".format(n_m, n_p)
if "weight" in n_p:
if isinstance(mo, nn.modules.Linear):
sum = torch.abs(p).sum(dim=1)
mask[name] = torch.where(sum == 0, torch.zeros_like(sum), torch.ones_like(sum))
elif isinstance(mo, nn.modules.Conv2d):
sum = torch.abs(p).sum(dim=(1, 2, 3))
mask[name] = torch.where(sum == 0, torch.zeros_like(sum), torch.ones_like(sum))
elif isinstance(mo, nn.modules.ConvTranspose2d):
sum = torch.abs(p).sum(dim=(0, 2, 3))
mask[name] = torch.where(sum == 0, torch.zeros_like(sum), torch.ones_like(sum))
else:
mask[name] = torch.where(p == 0, torch.zeros_like(p), torch.ones_like(p))
else:
mask[name] = torch.where(p == 0, torch.zeros_like(p), torch.ones_like(p))
return mask | 2e24af14d05802bac69b65a225ce284b5a7785e7 | 16,204 |
def connection():
"""Open a new connection or return the cached existing one"""
try:
existing_connection = GLOBAL_CACHE[CACHE_KEY_CONNECTION]
except KeyError:
new_connection = win32com.client.Dispatch(ADO_CONNECTION)
new_connection.Provider = CONNECTION_PROVIDER
new_connection.Open(CONNECTION_TARGET)
return GLOBAL_CACHE.setdefault(CACHE_KEY_CONNECTION, new_connection)
#
if not existing_connection.state:
# Reopen the connection if necessary
existing_connection.Open(CONNECTION_TARGET)
#
return existing_connection | f2c09fac89e0b0c9f9894869bb559ce61bca942a | 16,205 |
import subprocess
def getRigidAtoms():
"""Returns atoms in rigid bodies in PDB"""
atoms = []
fileName = pdbs[0]
subprocess.call(["kgs_prepare.py", fileName])
f = open(fileName[:-4] + ".kgs.pdb", "r")
lineList = f.readlines()
f.close()
if connectivity and not "CONECT" in lineList[-1]:
with open(connectivity) as fread:
with open(fileName[:-4] + ".kgs.pdb", "a") as fwrite:
for line in fread:
if "CONECT" in line:
fwrite.write(line)
subprocess.call(["kgs_rigidity", "--initial", fileName[:-4] + ".kgs.pdb", "--saveData", "2", "--workingDirectory", "./"])
with open(fileName[:-4] + ".kgs_RBs_1.txt") as f:
for line in f:
if not "NaN" in line and line != "\n":
atoms.append(line[:-1])
return atoms | 63a903a7a7a7f99c2d6bcd36329a07845dd327d5 | 16,206 |
def precision(theta,X,Y):
"""
accuracy function
computes the accuracy of the logistic model theta on X with true target variable Y
"""
m = np.shape(X)[0]
H = sigmoid(np.dot(X,theta))
H[H >= 0.5] = 1
H[H < 0.5] = 0
return np.sum(H == Y)/m | e3b2c1c613f5ae2f20b2b9a8e6e343348be845df | 16,207 |
def get_converter(obj, coords=None, dims=None, chains=None):
"""Get the converter to transform a supported object to an xarray dataset.
This function sends `obj` to the right conversion function. It is idempotent,
in that it will return xarray.Datasets unchanged.
Parameters
----------
obj : A dict, or an object from PyStan or PyMC3 to convert
coords : dict[str, iterable]
A dictionary containing the values that are used as index. The key
is the name of the dimension, the values are the index values.
dims : dict[str, Tuple(str)]
A mapping from pymc3 variables to a tuple corresponding to
the shape of the variable, where the elements of the tuples are
the names of the coordinate dimensions.
chains : int or None
The number of chains sampled from the posterior, only necessary for
converting dicts.
Returns
-------
xarray.Dataset
The coordinates are those passed in and ('chain', 'draw')
"""
if isinstance(obj, dict):
return DictToXarray(obj, coords, dims, chains=chains)
elif obj.__class__.__name__ == 'StanFit4Model': # ugly, but doesn't make PyStan a requirement
return PyStanToXarray(obj, coords, dims)
elif obj.__class__.__name__ == 'MultiTrace': # ugly, but doesn't make PyMC3 a requirement
return PyMC3ToXarray(obj, coords, dims)
else:
raise TypeError('Can only convert PyStan or PyMC3 object to xarray, not {}'.format(
obj.__class__.__name__)) | ee293672d74de5f0e1de0ff25c806fa10327c71c | 16,208 |
import pytz
def timezone_by_tzvar(tzvar):
"""Convert a WWTS tzvar to a tzdata timezone"""
return pytz.timezone(city_by_tzvar(tzvar)) | 0bc4d634ca5fcc55ceed062ae06fbe2eefb6c11a | 16,209 |
import json
def group_recommend(request):
"""
Get or post file/directory discussions to a group.
"""
content_type = 'application/json; charset=utf-8'
result = {}
if request.method == 'POST':
form = GroupRecommendForm(request.POST)
if form.is_valid():
repo_id = form.cleaned_data['repo_id']
attach_type = form.cleaned_data['attach_type']
path = form.cleaned_data['path']
message = form.cleaned_data['message']
# groups is a group_id list, e.g. [u'1', u'7']
groups = request.POST.getlist('groups')
username = request.user.username
groups_not_in = []
groups_posted_to = []
for group_id in groups:
# Check group id format
try:
group_id = int(group_id)
except ValueError:
result['error'] = _(u'Error: wrong group id')
return HttpResponse(json.dumps(result), status=400,
content_type=content_type)
group = get_group(group_id)
if not group:
result['error'] = _(u'Error: the group does not exist.')
return HttpResponse(json.dumps(result), status=400,
content_type=content_type)
# TODO: Check whether repo is in the group and Im in the group
if not is_group_user(group_id, username):
groups_not_in.append(group.group_name)
continue
# save message to group
gm = GroupMessage(group_id=group_id, from_email=username,
message=message)
gm.save()
# send signal
grpmsg_added.send(sender=GroupMessage, group_id=group_id,
from_email=username, message=message)
# save attachment
ma = MessageAttachment(group_message=gm, repo_id=repo_id,
attach_type=attach_type, path=path,
src='recommend')
ma.save()
# save discussion
fd = FileDiscuss(group_message=gm, repo_id=repo_id, path=path)
fd.save()
group_url = reverse('group_discuss', args=[group_id])
groups_posted_to.append(u'<a href="%(url)s" target="_blank">%(name)s</a>' % \
{'url':group_url, 'name':group.group_name})
if len(groups_posted_to) > 0:
result['success'] = _(u'Successfully posted to %(groups)s.') % {'groups': ', '.join(groups_posted_to)}
if len(groups_not_in) > 0:
result['error'] = _(u'Error: you are not in group %s.') % (', '.join(groups_not_in))
else:
result['error'] = str(form.errors)
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
# request.method == 'GET'
else:
repo_id = request.GET.get('repo_id')
path = request.GET.get('path', None)
repo = get_repo(repo_id)
if not repo:
result['error'] = _(u'Error: the library does not exist.')
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
if path is None:
result['error'] = _(u'Error: no path.')
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
# get discussions & replies
path_hash = calc_file_path_hash(path)
discussions = FileDiscuss.objects.filter(path_hash=path_hash, repo_id=repo_id)
msg_ids = [ e.group_message_id for e in discussions ]
grp_msgs = GroupMessage.objects.filter(id__in=msg_ids).order_by('-timestamp')
msg_replies = MessageReply.objects.filter(reply_to__in=grp_msgs)
for msg in grp_msgs:
msg.replies = []
for reply in msg_replies:
if msg.id == reply.reply_to_id:
msg.replies.append(reply)
msg.reply_cnt = len(msg.replies)
msg.replies = msg.replies[-3:]
ctx = {}
ctx['messages'] = grp_msgs
html = render_to_string("group/discussion_list.html", ctx)
result['html'] = html
return HttpResponse(json.dumps(result), content_type=content_type) | 1821eba9f2144ac5e9b6b9a4c5c8d935b3dd5e08 | 16,210 |
def easy_map(parser, token):
"""
The syntax:
{% easy_map <address> [<width> <height>] [<zoom>] [using <template_name>] %}
The "address" parameter can be an Address instance or a string describing it.
If an address is not found a new entry is created in the database.
"""
width, height, zoom, template_name = None, None, None, None
params = token.split_contents()
# pop the template name
if params[-2] == 'using':
template_name = params[-1]
params = params[:-2]
if len(params) < 2:
raise template.TemplateSyntaxError('easy_map tag requires address argument')
address = params[1]
if len(params) == 4:
width, height = params[2], params[3]
elif len(params) == 5:
width, height, zoom = params[2], params[3], params[4]
elif len(params) == 3 or len(params) > 5:
raise template.TemplateSyntaxError('easy_map tag has the following syntax: '
'{% easy_map <address> <width> <height> [zoom] [using <template_name>] %}')
return EasyMapNode(address, width, height, zoom, template_name) | b2968f6ff3cde324711f84a5b449fbab92cc22fa | 16,211 |
import six
def pack_feed_dict(name_prefixs, origin_datas, paddings, input_fields):
"""
Args:
name_prefixs: A prefix string of a list of strings.
origin_datas: Data list or a list of data lists.
paddings: A padding id or a list of padding ids.
input_fields: A list of input fields dict.
Returns: A dict for while loop.
"""
data = dict()
data["feed_dict"] = dict()
def map_fn(n, d, p):
# n: name prefix
# d: data list
# p: padding symbol
data[concat_name(n, Constants.IDS_NAME)] = d
n_samples = len(d)
n_devices = len(input_fields)
n_samples_per_gpu = n_samples // n_devices
if n_samples % n_devices > 0:
n_samples_per_gpu += 1
def _feed_batchs(_start_idx, _inpf):
if _start_idx * n_samples_per_gpu >= n_samples:
return 0
x, x_len = padding_batch_data(
d[_start_idx * n_samples_per_gpu:(_start_idx + 1) * n_samples_per_gpu], p)
data["feed_dict"][_inpf[concat_name(n, Constants.IDS_NAME)]] = x
data["feed_dict"][_inpf[concat_name(n, Constants.LENGTH_NAME)]] = x_len
return len(x_len)
parallels = repeat_n_times(
n_devices, _feed_batchs,
list(range(n_devices)), input_fields)
data["feed_dict"]["parallels"] = parallels
if isinstance(name_prefixs, six.string_types):
map_fn(name_prefixs, origin_datas, paddings)
else:
[map_fn(n, d, p) for n, d, p in zip(name_prefixs, origin_datas, paddings)]
return data | 2946a8869cac26737f6c5b6234ce0320cfdf5bcf | 16,212 |
def get_session_maker():
"""
Return an sqlalchemy sessionmaker object using an engine from get_engine().
"""
return sessionmaker(bind=get_engine()) | 2f1a500cf799910f98e7821582cb78d063eeb273 | 16,213 |
def rescale_intensity(arr, in_range, out_range):
""" Return arr after stretching or shrinking its intensity levels.
Parameters
----------
arr: array
input array.
in_range, out_range: 2-tuple
min and max intensity values of input and output arr.
Returns
-------
out: array
array after rescaling its intensity.
"""
imin, imax = in_range
omin, omax = out_range
out = np.clip(arr, imin, imax)
out = (out - imin) / float(imax - imin)
return out * (omax - omin) + omin | 580c789a6eb2ad03bcbdefd8e5f27b0c6a239f32 | 16,214 |
import traceback
import sys
def get_error_info():
"""Return info about last error."""
msg = "{0}\n{1}".format(str(traceback.format_exc()), str(sys.exc_info()))
return msg | 539a26f0a6bd6b733aa6e6ff1325faac6a32be12 | 16,215 |
import requests
def call_oai_api(resumption_token):
"""
Request page of data from the Argitrop OAI API
Parameters
----------
resumption_token : object (first page) or string or xml.etree.ElementTree.Element
token returned by previous request.
Returns
-------
response_xml : string
Response text as XML string
resumption_token : xml.etree.ElementTree.Element
tocken for requesting the next page
"""
oai_api_url = cfg.OAI_ENDPOINT_START % cfg.OAI_DATASET_NAME
if isinstance(resumption_token, ET.Element):
oai_api_url = cfg.OAI_ENDPOINT_CONTINUE % resumption_token.text
if isinstance(resumption_token, str):
oai_api_url = cfg.OAI_ENDPOINT_CONTINUE % resumption_token
headers = {'User-Agent': '%s' % cfg.USER_AGENT }
logger.info('Calling OAI API: %s', oai_api_url)
response = requests.get(oai_api_url, verify=True, headers=headers)
response_xml = ET.fromstring(response.text)
resumption_token = response_xml.find('oai:ListRecords', cfg.OAI_NS).find('oai:resumptionToken', cfg.OAI_NS)
return response_xml, resumption_token | e69ec11f75676a94134f4541b421391367ab1e3c | 16,216 |
import yaml
def save_pano_config(p):
"""
saves a panorama config file to the local disk from the session vars.
:return:
"""
filename = get_filename(p)
with open(filename, 'w') as yml_fh:
yml_fh.write(yaml.dump(session[p + '_config'], default_flow_style=False))
return redirect("/export") | 6a2575af4fe54caed7ce812d3fc2a876424912f7 | 16,217 |
import os
def append_source_filess(index_filename, source_files, driver):
"""This appends the paths to different source files to the temporary index file
For example
SRCSRV: source files ---------------------------------------
c:\php-sdk\phpdev\vc15\x86\php-7.2.14-src\ext\pdo_sqlsrv\pdo_dbh.cpp*pdo_sqlsrv/pdo_dbh.cpp
c:\php-sdk\phpdev\vc15\x86\php-7.2.14-src\ext\pdo_sqlsrv\pdo_init.cpp*pdo_sqlsrv/pdo_init.cpp
... ...
c:\php-sdk\phpdev\vc15\x86\php-7.2.14-src\ext\pdo_sqlsrv\shared\core_stream.cpp*shared/core_stream.cpp
c:\php-sdk\phpdev\vc15\x86\php-7.2.14-src\ext\pdo_sqlsrv\shared\core_util.cpp*shared/core_util.cpp
SRCSRV: end ------------------------------------------------
"""
failed = False
with open(index_filename, 'a') as idx_file:
idx_file.write('SRCSRV: source files ---------------------------------------' + os.linesep)
with open(source_files, 'r') as src_file:
for line in src_file:
pos = line.find('shared')
if (pos > 0): # it's a nested folder, so it must be positive
relative_path = line[pos:]
src_line = line[:-1] + '*' + relative_path.replace('\\', '/')
else: # not a file in the shared folder
pos = line.find(driver)
if (pos <= 0):
print('ERROR: Expected to find', driver, 'in', line)
failed = True
break
else:
relative_path = line[pos:]
src_line = line[:-1] + '*' + relative_path.replace('\\', '/')
idx_file.write(src_line)
idx_file.write('SRCSRV: end ------------------------------------------------' + os.linesep)
return failed | e26252740c2c64b581d4be56e25086216bc36e1b | 16,218 |
def is_transport(name):
"""Test if all parts of a name are transport coefficients
For example, efe_GB, chie_GB_div_efi_GB are all composed of transport
coefficients, but gam_GB and chiee_GB_plus_gam_GB are not.
"""
transport = True
try:
for part_name in extract_part_names(split_parts(name)):
transport &= split_name(part_name)[0] in heat_vars + particle_vars + momentum_vars
except ValueError:
transport = False
return transport | 1aea3915680b3c74422cbd7648fd920719dd3cc8 | 16,219 |
def detect_moved_files(file_manifest, diff):
""" Detect files that have been moved """
previous_hashes = defaultdict(set)
for item in file_manifest['files']: previous_hashes[item['hash']].add(item['path'])
diff_dict = make_dict(diff)
# files with duplicate hashes are assumed to have the same contents
moved_files = {}
not_found = []
for val in diff:
if val['status'] == 'new' and val['hash'] in previous_hashes:
found = None; prev_filtered = []
for itm in previous_hashes[val['hash']]:
if itm.split('/')[-1] == val['path'].split('/')[-1]: found = itm; break
if found != None and found in diff_dict and diff_dict[found]['status'] == 'delete':
previous_hashes[val['hash']].remove(found)
moved_files[val['path']] = {'from' : found, 'to' : val['path']}
else: not_found.append(val)
# At this point all duplicate items which have been moved but which retain the original name
# have been removed from there relevant set. Remaining items are assigned on an ad-hoc basis.
# As there hashes are the same, there contents is assumed to be the same so mis-assignments
# are not very important.
for val in not_found:
itm = previous_hashes[val['hash']].pop()
if itm in diff_dict and diff_dict[itm]['status'] == 'delete':
moved_files[val['path']] = {'from' : itm, 'to' : val['path']}
# Replace separate 'new' and 'delete' with a single 'moved' command.
for key, value in moved_files.iteritems():
moved_from = diff_dict.pop(value['from']) # remove the delete from the diff
moved_to = diff_dict[value['to']]
diff_dict[value['to']] = moved_from # start with where the file was moved from
diff_dict[value['to']]['status'] = 'moved'
diff_dict[value['to']]['moved_from'] = value['from']
diff_dict[value['to']]['path'] = moved_to['path'] # Copy the moved path
diff_dict[value['to']]['created'] = moved_to['created'] # Copy 'created' from the moved file
diff_dict[value['to']]['last_mod'] = moved_to['last_mod'] # Copy last_mod from the moved file
return [change for p, change in diff_dict.iteritems()] | db97dfb88d4fa253351e149dacf68a9fa3043072 | 16,220 |
def decodecaps(blob):
"""decode a bundle2 caps bytes blob into a dictionary
The blob is a list of capabilities (one per line)
Capabilities may have values using a line of the form::
capability=value1,value2,value3
The values are always a list."""
caps = {}
for line in blob.splitlines():
if not line:
continue
if b'=' not in line:
key, vals = line, ()
else:
key, vals = line.split(b'=', 1)
vals = vals.split(b',')
key = urlreq.unquote(key)
vals = [urlreq.unquote(v) for v in vals]
caps[key] = vals
return caps | 3c18bbe6b4b6a0562719d4992d6937d60f6bc114 | 16,221 |
from typing import Tuple
import os
def create_feature_columns() -> Tuple[list, list, list, list, list]:
"""
Returns:
dense_feature_columns (list): 连续特征的feature_columns
category_feature_columns (list): 类别特征的feature_columns
target_feedid_feature_columns (list): 目标feed的feature_columns
sequence_feature_columns (list): 历史行为队列的feature_columns
label_feature_columns (list): 因变量的feature_columns
"""
category_feature_columns, dense_feature_columns = [], []
target_feedid_feature_columns, sequence_feature_columns = [], []
label_feature_columns = []
# 连续特征
videoplayseconds = fc.numeric_column('videoplayseconds', default_value=0.0)
u_read_comment_7d_sum = fc.numeric_column('u_read_comment_7d_sum', default_value=0.0)
u_like_7d_sum = fc.numeric_column('u_like_7d_sum', default_value=0.0)
u_click_avatar_7d_sum = fc.numeric_column('u_click_avatar_7d_sum', default_value=0.0)
u_forward_7d_sum = fc.numeric_column('u_forward_7d_sum', default_value=0.0)
u_comment_7d_sum = fc.numeric_column('u_comment_7d_sum', default_value=0.0)
u_follow_7d_sum = fc.numeric_column('u_follow_7d_sum', default_value=0.0)
u_favorite_7d_sum = fc.numeric_column('u_favorite_7d_sum', default_value=0.0)
i_read_comment_7d_sum = fc.numeric_column('i_read_comment_7d_sum', default_value=0.0)
i_like_7d_sum = fc.numeric_column('i_like_7d_sum', default_value=0.0)
i_click_avatar_7d_sum = fc.numeric_column('i_click_avatar_7d_sum', default_value=0.0)
i_forward_7d_sum = fc.numeric_column('i_forward_7d_sum', default_value=0.0)
i_comment_7d_sum = fc.numeric_column('i_comment_7d_sum', default_value=0.0)
i_follow_7d_sum = fc.numeric_column('i_follow_7d_sum', default_value=0.0)
i_favorite_7d_sum = fc.numeric_column('i_favorite_7d_sum', default_value=0.0)
c_user_author_read_comment_7d_sum = fc.numeric_column('c_user_author_read_comment_7d_sum', default_value=0.0)
dense_feature_columns += [videoplayseconds, u_read_comment_7d_sum, u_like_7d_sum, u_click_avatar_7d_sum,
u_forward_7d_sum, u_comment_7d_sum, u_follow_7d_sum, u_favorite_7d_sum,
i_read_comment_7d_sum, i_like_7d_sum, i_click_avatar_7d_sum, i_forward_7d_sum,
i_comment_7d_sum, i_follow_7d_sum, i_favorite_7d_sum,
c_user_author_read_comment_7d_sum]
# 类别特征
userid = fc.categorical_column_with_vocabulary_file('userid', os.path.join(FLAGS.vocabulary_dir, 'userid.txt'))
feedid = fc.sequence_categorical_column_with_vocabulary_file('feedid',
os.path.join(FLAGS.vocabulary_dir, 'feedid.txt'))
device = fc.categorical_column_with_vocabulary_file('device', os.path.join(FLAGS.vocabulary_dir, 'device.txt'))
authorid = fc.categorical_column_with_vocabulary_file('authorid',
os.path.join(FLAGS.vocabulary_dir, 'authorid.txt'))
bgm_song_id = fc.categorical_column_with_vocabulary_file('bgm_song_id',
os.path.join(FLAGS.vocabulary_dir, 'bgm_song_id.txt'))
bgm_singer_id = fc.categorical_column_with_vocabulary_file('bgm_singer_id',
os.path.join(FLAGS.vocabulary_dir, 'bgm_singer_id.txt'))
manual_tag_list = fc.categorical_column_with_vocabulary_file('manual_tag_list', os.path.join(FLAGS.vocabulary_dir,
'manual_tag_id.txt'))
his_read_comment_7d_seq = fc.sequence_categorical_column_with_vocabulary_file('his_read_comment_7d_seq',
os.path.join(FLAGS.vocabulary_dir,
'feedid.txt'))
userid_emb = fc.embedding_column(userid, 16)
feedid_emb = fc.shared_embedding_columns([feedid, his_read_comment_7d_seq], 16, combiner='mean')
device_emb = fc.embedding_column(device, 2)
authorid_emb = fc.embedding_column(authorid, 4)
bgm_song_id_emb = fc.embedding_column(bgm_song_id, 4)
bgm_singer_id_emb = fc.embedding_column(bgm_singer_id, 4)
manual_tag_id_emb = fc.embedding_column(manual_tag_list, 4, combiner='mean')
category_feature_columns += [userid_emb, device_emb, authorid_emb, bgm_song_id_emb, bgm_singer_id_emb,
manual_tag_id_emb]
target_feedid_feature_columns += [feedid_emb[0]]
sequence_feature_columns += [feedid_emb[1]]
# label
read_comment = fc.numeric_column("read_comment", default_value=0.0)
label_feature_columns += [read_comment]
return dense_feature_columns, category_feature_columns, target_feedid_feature_columns, sequence_feature_columns, label_feature_columns | 292497e6228e858e98b69350fb6507a0346efd16 | 16,222 |
def an(pos=5):
"""
Alineamiento del texto.
@pos:
1: Abajo izquierda
2: Abajo centro
3: Abajo derecha
4: Mitad derecha
5: Mitad centro
6: Mitad derecha
7: Arriba izquierda
8: Arriba centro
9: Arriba derecha
"""
apos = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
if pos not in apos:
raise ValueError('\n\nan(pos):\n<pos> solo acepta los '
'sigientes valores: ' + str(apos))
else:
return '\\an{:d}'.format(pos) | fbe1e89282ebdf7b4977bee295e2cac7735bd652 | 16,223 |
def arista_output(accesslist):
"""Helper function to generate accesslist ouput appropriate for an
Arista switch/router. This will eventually get rolled up into
a output module or class."""
# I have studied the sacred texts from the merciless Trigger Dojo
# TODO: this should be refactored, it's a bit of a mess and
# doesn't take ICMP in to account.
output = ['ip access-list {}'.format(accesslist.name)]
for entry in accesslist:
for protocol in entry.condition['protocol']:
for srcip in entry.condition['srcip']:
for srcport in _check(entry.condition['srcport']):
for dstip in entry.condition['dstip']:
for dstport in _check(entry.condition['dstport']):
output.append(_build_output(entry.index,
entry.action,
protocol,
srcip, srcport,
dstip, dstport))
return output | 7bebd68d76aa51c5965f5850731c47b220871a0b | 16,224 |
def main(iterator):
"""
Given a line iterator of the bash file, returns a dictionary of
keys to values
"""
values = {}
for line in iterator:
if not line.startswith('#') and len(line.strip()) > 0:
match_obj = line_regex.search(line)
if match_obj is not None:
key, value = match_obj.group(1), match_obj.group(2)
values[key] = try_parse(value)
return values | 16cc188b367200c317119348d9440d57faa322a9 | 16,225 |
def is_any(typeref: irast.TypeRef) -> bool:
"""Return True if *typeref* describes the ``anytype`` generic type."""
return isinstance(typeref, irast.AnyTypeRef) | 75ca055529fea35dfeb2519c3de61bf3739ce1f7 | 16,226 |
from typing import Union
def repeat_1d(inputs: tf.Tensor, count: Union[tf.Tensor, int], name="repeat_1d"):
"""Repeats each element of `inputs` `count` times in a row.
'''python
repeat_1d(tf.range(4), 2) -> 0, 0, 1, 1, 2, 2, 3, 3
'''
Parameters:
inputs: A 1D tensor with shape [`size`] to be repeated.
count: An integer, used to specify the number of time elements of `inputs` are repeated.
name: An optional string to specify the `name_scope` of this operation.
Returns:
A 1D tensor with shape [`size` * `count`] and same type as `inputs`.
"""
with tf.name_scope(name):
outputs = tf.expand_dims(inputs, 1)
outputs = tf.tile(outputs, [1, count])
outputs = tf.reshape(outputs, [-1])
return outputs | 44a8bb29dcd2ba0e2e5970aff1eab94b85a34c13 | 16,227 |
import logging
def create_logger(logfile=r"/tmp/tomoproc.log"):
"""Default logger for exception tracking"""
logger = logging.getLogger("tomoproc_logger")
logger.setLevel(logging.INFO)
# create the logging file handler
fh = logging.FileHandler(logfile)
fh.setFormatter(
logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
)
# add handler to logger object
logger.addHandler(fh)
return logger | a0c005c39af9d24d7198790cf0cfe31a1b6395a0 | 16,228 |
async def async_setup(opp: OpenPeerPower, config: ConfigType) -> bool:
"""Set up the Twente Milieu components."""
async def update(call) -> None:
"""Service call to manually update the data."""
unique_id = call.data.get(CONF_ID)
await _update_twentemilieu(opp, unique_id)
opp.services.async_register(DOMAIN, SERVICE_UPDATE, update, schema=SERVICE_SCHEMA)
return True | f2c0dd14e9193b9fa3ae3ea87689e90d9eb2c1bc | 16,229 |
import re
def parsePDCfile(fpath='data/CPTAC2_Breast_Prospective_Collection_BI_Proteome.tmt10.tsv'):
"""
Takes a PDC file ending in .tmt10.tsv or .itraq.tsv and creates
tidied data frame with Gene, Patient, logratio and diffFromMean values
Parameters
----------
fpath : chr, optional
DESCRIPTION. The default is 'data/CPTAC2_Breast_Prospective_Collection_BI_Proteome.tmt10.tsv'.
Return
-------
None.
"""
dat = pd.read_csv(fpath, sep='\t')
newdat = dat[['Gene', 'NCBIGeneID']]
#retrieve log ratios
pat = re.compile('.*[0-9]+\ Log Ratio')
pats = list(filter(pat.match, dat.keys()))
for pat in pats:
up_pat = pat.replace(' Log Ratio', '')
newdat[up_pat] = dat[pat]
#now tidy data by log ratio by patient
tdat = pd.melt(newdat, id_vars=['Gene', 'NCBIGeneID'],\
var_name='Patient', value_name='logratio')
return tdat | 48b421d965e9b7f337a1f58c3665643eba514a7c | 16,230 |
def ave(x):
"""
Returns the average value of a list.
:param x: a given list
:return: the average of param x
"""
return np.mean(x) | ad7737321d9f0fc8461129b0153f40da2d75dc70 | 16,231 |
def information_gain(f1, f2):
"""
This function calculates the information gain, where ig(f1,f2) = H(f1) - H(f1|f2)
Input
-----
f1: {numpy array}, shape (n_samples,)
f2: {numpy array}, shape (n_samples,)
Output
------
ig: {float}
"""
ig = entropyd(f1) - conditional_entropy(f1, f2)
return ig | 39c60bf6a9fbf18f4d5ba3af609fed53771bd817 | 16,232 |
import os
def output_if_exists(filename):
"""Returns file name if the file exists
Parameters
----------
filename : str
File in question.
Returns
-------
str
Filename.
"""
if os.path.exists(filename):
return filename
return None | 7589e8c5f2a42013cb391cf2d10ad3a9f9cb46ed | 16,233 |
def subsequent_chunk_mask(
size: int,
chunk_size: int,
num_left_chunks: int=-1, ) -> paddle.Tensor:
"""Create mask for subsequent steps (size, size) with chunk size,
this is for streaming encoder
Args:
size (int): size of mask
chunk_size (int): size of chunk
num_left_chunks (int): number of left chunks
<0: use full chunk
>=0: use num_left_chunks
Returns:
paddle.Tensor: mask, [size, size]
Examples:
>>> subsequent_chunk_mask(4, 2)
[[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1]]
"""
ret = paddle.zeros([size, size], dtype=paddle.bool)
for i in range(size):
if num_left_chunks < 0:
start = 0
else:
start = max(0, (i // chunk_size - num_left_chunks) * chunk_size)
ending = min(size, (i // chunk_size + 1) * chunk_size)
ret[i, start:ending] = True
return ret | 512def08ef2fe35cdd80ba7eb92f30b73aef1782 | 16,234 |
def top_tags(request):
"""
Shows a list of the most-used Tags.
Context::
object_list
The list of Tags
Template::
cab/top_tags.html
"""
return render_to_response('cab/top_tags.html',
{ 'object_list': Snippet.objects.top_items('tag', 20) },
context_instance=RequestContext(request)) | 07cf792fb3bd0ed5a1185986fb3154cb645b2a75 | 16,235 |
def check_integer_sign(value):
"""
:param value:
:return:
"""
return value >= 0 | 0ab012b62bf7b12ecabea8d1a4538bb30e197e07 | 16,236 |
import torch
def masks_empty(sample, mask_names):
""" Tests whether a sample has any non-masked values """
return any(not torch.any(sample[name] != 0) for name in mask_names) | 4c13b123fe6f5a17c3cd2ee673c54de331af7b23 | 16,237 |
def quantize_factor(factor_data, quantiles=5, bins=None, by_group=False):
"""
Computes period wise factor quantiles.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for each period,
The factor quantile/bin that factor value belongs too, and (optionally) the group the
asset belongs to.
quantiles : int or sequence[float]
Number of equal-sized quantile buckets to use in factor bucketing.
Alternately sequence of quantiles, allowing non-equal-sized buckets
e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]
Only one of 'quantiles' or 'bins' can be not-None
bins : int or sequence[float]
Number of equal-width (valuewise) bins to use in factor bucketing.
Alternately sequence of bin edges allowing for non-uniform bin width
e.g. [-4, -2, -0.5, 0, 10]
Only one of 'quantiles' or 'bins' can be not-None
by_group : bool
If True, compute quantile buckets separately for each group.
Returns
-------
factor_quantile : pd.Series
Factor quantiles indexed by date and asset.
"""
def quantile_calc(x, _quantiles, _bins):
if _quantiles is not None:
return pd.qcut(x, _quantiles, labels=False) + 1
elif _bins is not None:
return pd.cut(x, _bins, labels=False) + 1
raise ValueError('quantiles or bins should be provided')
grouper = [factor_data.index.get_level_values('date')]
if by_group:
grouper.append('group')
factor_quantile = factor_data.groupby(grouper)['factor'].apply(quantile_calc, quantiles, bins)
factor_quantile.name = 'factor_quantile'
return factor_quantile.dropna() | 1b51b84e9f22a1b0e0c2bb578a2011c1b8f725e2 | 16,238 |
import os
def load_all_dbs(database_dir):
"""Load and return a ShowDB and TrackerDB.
Returns:
showdb: ShowDatabse instance
tracker: TrackerDatabase instance
"""
showdb = load_database(os.path.join(database_dir, '.showdb.json'))
tracker = load_database(os.path.join(database_dir, '.tracker.json'))
return showdb, tracker | 65fa856c2561cf54a4cc10dbf380f3c2fc5b97d3 | 16,239 |
def listSplit(aList, n):
"""将一个列表以n个元素为一个单元进行均分,返回嵌套列表"""
return [aList[i:i+n] for i in range(0,len(aList),n)] | 936d4ff5b3bbbc39c57c01dc6a12e42b7dc6e0de | 16,240 |
import json
def refs(request):
""" Настройка назначения анализов вместе """
if request.method == "GET":
rows = []
fraction = directory.Fractions.objects.get(pk=int(request.GET["pk"]))
for r in directory.References.objects.filter(fraction=fraction).order_by("pk"):
rows.append(
{
'pk': r.pk,
'title': r.title,
'about': r.about,
'ref_m': json.loads(r.ref_m) if isinstance(r.ref_m, str) else r.ref_m,
'ref_f': json.loads(r.ref_f) if isinstance(r.ref_f, str) else r.ref_f,
'del': False,
'hide': False,
'isdefault': r.pk == fraction.default_ref_id,
}
)
return JsonResponse(rows, safe=False)
elif request.method == "POST":
pk = int(request.POST["pk"])
default = int(request.POST["default"])
if pk > -1:
fraction = directory.Fractions.objects.get(pk=pk)
for r in json.loads(request.POST["refs"]):
r["ref_m"].pop("", None)
r["ref_f"].pop("", None)
if r["del"] and r["pk"] != -1:
directory.References.objects.filter(pk=r["pk"]).delete()
if r["pk"] == default:
default = -1
elif not r["del"] and r["pk"] == -1:
nrf = directory.References(title=r["title"], about=r["about"], ref_m=r["ref_m"], ref_f=r["ref_f"], fraction=fraction)
nrf.save()
if r["isdefault"]:
default = nrf.pk
else:
row = directory.References.objects.get(pk=r["pk"])
row.title = r["title"]
row.about = r["about"]
row.ref_m = json.dumps(r["ref_m"])
row.ref_f = json.dumps(r["ref_f"])
row.save()
fraction.default_ref = None if default == -1 else directory.References.objects.get(pk=default)
fraction.save()
return JsonResponse({"ok": True}) | 7635525efbdab8e22c21019f8de9cc74a83c9c2a | 16,241 |
def transform(data):
"""replace the data value in the sheet if it is zero
:param data: data set
:return: data set without zero
"""
data_transformed = data.applymap(zero2minimum)
return data_transformed | b717c6f42c8f0ae0c68c97647a33e77aec2f1508 | 16,242 |
def findChildren(node, name):
"""Returns all the children of input node, with a matching name.
Arguments:
node (dagNode): The input node to search
name (str): The name to search
Returns:
dagNode list: The children dagNodes
"""
return __findChildren(node, name, False) | 9258dac1261e24d3cc5e58030147ce693fbd0356 | 16,243 |
def get_process_rss(force_update=False, pid=None):
"""
<Purpose>
Returns the Resident Set Size of a process. By default, this will
return the information cached by the last call to _get_proc_info_by_pid.
This call is used in get_process_cpu_time.
<Arguments>
force_update:
Allows the caller to force a data update, instead of using the cached data.
pid:
If force_update is True, this parameter must be specified to force the update.
<Exceptions>
See _get_proc_info_by_pid.
<Returns>
The RSS of the process in bytes.
"""
global last_proc_info_struct
# Check if an update is being forced
if force_update and pid != None:
# Update the info
_get_proc_info_by_pid(pid)
# Get RSS
rss_pages = last_proc_info_struct.ki_rssize
rss_bytes = rss_pages * PAGE_SIZE
return rss_bytes | 99c1c3fd35db4bb22c2c37aba48ddb7049ec26fa | 16,244 |
from typing import Dict
def doc_to_dict(doc) -> Dict:
"""Takes whatever the mongo doc is and turns into json serializable dict"""
ret = {k: stringify_mongovalues(v) for k, v in doc.items() if k != "_id"}
ret["_id"] = str(doc["_id"])
return ret | 9e3f72568cf25ac864c1add2989c8e1cb064661d | 16,245 |
def add_srv_2cluster(cluster_name, srvjson):
"""
添加服务到数据库
:param cluster_name:
:param srvjson:
:return:
"""
status = ''
message = ''
resp = {"status": status, "message": message}
host_name = srvjson.get('host_name')
service_name = srvjson.get('service_name')
sfo_clu_node = SfoClusterNodesMethod.query_host_by_host_name(host_name)
if not sfo_clu_node:
raise ValueError('Not Found Node Host %s' % host_name)
swift_op = SwiftServiceOperation(sfo_clu_node.node_inet_ip)
try:
content = swift_op.install_service(service_name)
except Exception, error:
status = 501
message = str(error)
else:
status = 200
message = content
resp.update({"status": status, "message": message})
return resp, status | 2e3c6ec6a312016785affbc71c5c2f178a0ecd84 | 16,246 |
def _add_left_zeros(number, iteration_digits):
"""Add zeros to the left side of the experiment run number.
Zeros will be added according to missing spaces until iterations_digits are
reached.
"""
number = str(number)
return f'{"0" * (iteration_digits - len(number))}{number}' | e3f86a7e7f276ceff4eb662a3f5bc364b4d10ea3 | 16,247 |
def sharpdiff(y_true, y_pred):
"""
@param y_true: tensor of shape (batch_size, height, width, channels)
@param y_pred: tensor of shape (batch_size, height, width, channels)
@return: the sharpness difference as a scalar
"""
def log10(tensor):
numerator = tf.math.log(tensor);
denominator = tf.math.log(tf.constant(10, dtype = numerator.dtype));
return numerator / denominator;
shape = tf.shape(y_pred);
num_pixels = tf.cast(shape[1] * shape[2] * shape[3], tf.float32);
y_true_dy, y_true_dx = tf.image.image_gradients(y_true);
y_pred_dy, y_pred_dx = tf.image.image_gradients(y_pred);
pred_grad_sum = y_pred_dx + y_pred_dy;
true_grad_sum = y_true_dx + y_true_dy;
grad_diff = tf.abs(true_grad_sum - pred_grad_sum);
grad_diff_red = tf.reduce_sum(grad_diff, [1, 2, 3]);
batch_errors = 10 * log10(1 / ((1 / num_pixels) * grad_diff_red));
return tf.reduce_mean(batch_errors); | 0c08541fd5c551c5a2ca1afb598adfc627c06286 | 16,248 |
import logging
def admin_setfriend():
""" Set the friend state of a user """
uid = request.args.get("uid", "")
state = request.args.get("state", "1") # Default: set as friend
try:
state = bool(int(state))
except Exception:
return (
"<html><body><p>Invalid state string: '{0}'</p></body></html>"
.format(state)
)
u = User.load_if_exists(uid) if uid else None
if u is None:
return "<html><body><p>Unknown user id '{0}'</p></body></html>".format(uid)
was_friend = u.friend()
u.set_friend(state)
u.set_has_paid(state)
u.update()
logging.info("Friend state of user {0} manually set to {1}".format(uid, state))
return (
"<html><body><p>User '{0}': friend state was '{2}', set to '{1}'</p></body></html>"
.format(uid, state, was_friend)
) | f4b3a04b18735320968513666ad5901a68e5a492 | 16,249 |
def LF_CG_BICLUSTER_BINDS(c):
"""
This label function uses the bicluster data located in the
A global network of biomedical relationships
"""
sen_pos = c.get_parent().position
pubmed_id = c.get_parent().document.name
query = bicluster_dep_df.query("pubmed_id==@pubmed_id&sentence_num==@sen_pos")
if not(query.empty):
if query["B"].sum() > 0.0:
return 1
return 0 | 29aeb5af69257a9c762bccc45c09e68d0799174c | 16,250 |
import argparse
def setup_argparse(parser: argparse.ArgumentParser) -> None:
"""Setup argument parser for ``cubi-tk org-raw check``."""
return OrganizeCommand.setup_argparse(parser) | fefbb3fd16905f353f9d6f98c3631024ca3e4e78 | 16,251 |
from typing import List
import random
def single_point_crossover(parents: List[Chromosome], probability: float = 0.7) -> List[Chromosome]:
""" Make the crossover of two parents to generate two child.
The crossover has a probability to be made.
The crossover point is random.
:param parents: selected parents
:param probability: probability that the crossover is made
:return: offspring
"""
cut_point = random.randint(1, len(parents[1].genes) - 1)
if random.random() < probability:
first_child = Chromosome(parents[0].genes[:cut_point] + parents[1].genes[cut_point:])
second_child = Chromosome(parents[1].genes[:cut_point] + parents[0].genes[cut_point:])
else:
first_child = Chromosome(parents[0].genes.copy())
second_child = Chromosome(parents[1].genes.copy())
return [first_child, second_child] | 4e8dd96fc42a8a1a1feb7c1c3dad42892e060425 | 16,252 |
def get_node_count(network=None, base_url=DEFAULT_BASE_URL):
"""Reports the number of nodes in the network.
Args:
network (SUID or str or None): Name or SUID of a network or view. Default is the
"current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
int: count of nodes in network.
Raises:
ValueError: if server response has no JSON
CyError: if network name or SUID doesn't exist
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> get_node_count()
6
>>> get_node_count(52)
6
>>> get_node_count('galFiltered.sif')
6
"""
net_suid = get_network_suid(network, base_url=base_url)
res = commands.cyrest_get(f'networks/{net_suid}/nodes/count', base_url=base_url)
return res['count'] | c80e34443c4e39a96496eca5867333800b0208c5 | 16,253 |
from typing import Dict
from typing import Any
from typing import Tuple
from typing import Optional
import re
def process_sample(
sample: Dict[str, Any],
relation_vocab: Dict[str, int],
spacy_model: Any,
tokenizer: Any,
) -> Tuple[Optional[Dict[str, Any]], Dict[str, int]]:
"""Processes WebRED sample and updates relation vocabulary.
To process a raw WebRED example, we first extract subj and obj and remove the
annotations from the text. The resulting text is parsed with a spacy model to
find mention spans, and then tokenized with a BERT tokenizer. If necessary, we
override some spacy mentions with the subj and obj WebRED mentions.
Args:
sample: raw WebRED sample. Needs to contain following fields: token, list of
token strings. relation, string describing relation between subj and obj.
relation_vocab: dictionary mapping relation strings to integer labels.
spacy_model: spacy model used to detect mentions.
tokenizer: BERT tokenizer.
Returns:
Processed WebRED sample and updated relation vocabulary.
"""
processed_sample = {}
if sample['num_pos_raters'] < 2:
relation = NO_RELATION
else:
relation = sample['relation']
if relation not in relation_vocab:
relation_vocab[relation] = len(relation_vocab)
label = relation_vocab[relation]
processed_sample['target'] = [label]
text = sample['annotated_text']
# Remove subj and obj annotations from text and store position
def find_span(input_text: str, pattern: Any,
prefix_len: int) -> Tuple[int, int]:
"""Find span corresponding to actual subj or obj strings."""
match = pattern.search(input_text)
span_start = match.start() + prefix_len + 1
# We want inclusive spans, hence -2 instead of -1
span_end = match.end() - 2
return (span_start, span_end)
def replace_and_adjust(
input_text: str, match: Any, prefix_len: int,
inverted_mapping: np.ndarray) -> Tuple[str, np.ndarray]:
"""Remove subj/obj annotations and adjust token mapping accordingly."""
original_span_start = match.start() + prefix_len + 1
original_span_end = match.end() - 1
actual_string = input_text[original_span_start:original_span_end]
new_text = input_text[:match.start()] + actual_string + input_text[match
.end():]
# Inverted mapping maps from remaining tokens to positions in original text
new_inverted_mapping = np.zeros(len(new_text), dtype=np.int32)
new_inverted_mapping[:match.start()] = inverted_mapping[:match.start()]
new_span_start = match.start()
new_span_end = match.start() + len(actual_string)
new_inverted_mapping[new_span_start:new_span_end] = inverted_mapping[
original_span_start:original_span_end]
new_inverted_mapping[new_span_end:] = inverted_mapping[original_span_end +
1:]
return new_text, new_inverted_mapping
inverted_mapping = np.arange(len(text))
subj_pattern = re.compile('SUBJ{[^}]+}')
subj_span = find_span(text, subj_pattern, len('SUBJ'))
obj_pattern = re.compile('OBJ{[^}]+}')
obj_span = find_span(text, obj_pattern, len('OBJ'))
# Remove subj/obj annotations from text
while True:
subj_match = subj_pattern.search(text)
if subj_match is None:
break
text, inverted_mapping = replace_and_adjust(text, subj_match, len('SUBJ'),
inverted_mapping)
while True:
obj_match = obj_pattern.search(text)
if obj_match is None:
break
text, inverted_mapping = replace_and_adjust(text, obj_match, len('OBJ'),
inverted_mapping)
# Adjust spans for removed tokens
mapping = np.zeros(len(sample['annotated_text']), dtype=np.int32) - 1
mapping[inverted_mapping] = np.arange(len(inverted_mapping))
subj_span = (mapping[subj_span[0]], mapping[subj_span[1]])
assert subj_span[0] != -1 and subj_span[1] != -1
obj_span = (mapping[obj_span[0]], mapping[obj_span[1]])
assert obj_span[0] != -1 and obj_span[1] != -1
parsed_text = spacy_model(text)
# We use spacy to parse text, identify noun chunks
mention_char_spans = []
mention_char_spans.append(subj_span)
mention_char_spans.append(obj_span)
def overlaps(first_span: Tuple[int, int], second_span: Tuple[int,
int]) -> bool:
def point_inside_span(point: int, span: Tuple[int, int]) -> bool:
return span[0] >= point and point <= span[1]
spans_overlap = (
point_inside_span(first_span[0], second_span) or
point_inside_span(first_span[1], second_span) or
point_inside_span(second_span[0], first_span) or
point_inside_span(second_span[1], first_span))
return spans_overlap
for chunk in parsed_text.noun_chunks:
span_start_char = parsed_text[chunk.start].idx
span_last_token = parsed_text[chunk.end - 1]
span_end_char = span_last_token.idx + len(span_last_token.text) - 1
char_span = (span_start_char, span_end_char)
# Append only if does not overlap with subj or obj spans. In case spacy
# mention annotation disagrees with tacred annotation, we want to favor
# tacred.
if not overlaps(char_span, subj_span) and not overlaps(char_span, obj_span):
mention_char_spans.append(char_span)
# Sort spans by start char
start_chars = np.array([span[0] for span in mention_char_spans])
sorted_indices = np.argsort(start_chars)
sorted_positions = np.zeros_like(start_chars)
sorted_positions[sorted_indices] = np.arange(len(sorted_positions))
sorted_spans = [mention_char_spans[idx] for idx in sorted_indices]
# Tokenize and get aligned mention positions
_, text_ids, text_mask, mention_spans, span_indices = tokenization_utils.tokenize_with_mention_spans(
tokenizer=tokenizer,
sentence=text,
spans=sorted_spans,
max_length=FLAGS.max_length,
add_bert_tokens=True,
allow_truncated_spans=True,
)
processed_sample['text_ids'] = text_ids
processed_sample['text_mask'] = text_mask
# Subj and obj are the first elements of mention spans.
subj_index = sorted_positions[0]
obj_index = sorted_positions[1]
# Some spans may be dropped by the BERT tokenizer. Here we map indices in the
# original list of spans to the one returned by the tokenizer.
reverse_span_indices = {
original_idx: tokenized_idx
for tokenized_idx, original_idx in enumerate(span_indices)
}
# Skip if subj or obj dropped.
if (subj_index not in reverse_span_indices or
obj_index not in reverse_span_indices):
return None, relation_vocab
subj_index = reverse_span_indices[subj_index]
obj_index = reverse_span_indices[obj_index]
# Make sure we don't discard subj or obj
assert max(subj_index, obj_index) < FLAGS.max_mentions
processed_sample['subject_mention_indices'] = [subj_index]
processed_sample['object_mention_indices'] = [obj_index]
mention_spans = np.array(mention_spans)
mention_start_positions = mention_spans[:, 0]
mention_end_positions = mention_spans[:, 1]
mention_start_positions = mention_start_positions[:FLAGS.max_mentions]
mention_end_positions = mention_end_positions[:FLAGS.max_mentions]
mention_pad_shape = (0, FLAGS.max_mentions - len(mention_start_positions))
mention_mask = np.ones(len(mention_start_positions), dtype=np.int64)
mention_mask = np.pad(mention_mask, mention_pad_shape, mode='constant')
mention_start_positions = np.pad(
mention_start_positions, mention_pad_shape, mode='constant')
mention_end_positions = np.pad(
mention_end_positions, mention_pad_shape, mode='constant')
processed_sample['mention_start_positions'] = mention_start_positions
processed_sample['mention_end_positions'] = mention_end_positions
processed_sample['mention_mask'] = mention_mask
return processed_sample, relation_vocab | 74a80fb69fdebb35c86830f54344fc770ad91cd4 | 16,254 |
import tempfile
def transform_s3(key, bucket="songsbuckettest"):
"""
REMEBER TO DO DEFENSIVE PROGRAMMING, WRAP IN TRY/CATCH
"""
s3 = boto3.client('s3')
# print("connection to s3 -- Test")
with tempfile.NamedTemporaryFile(mode='wb') as tmp:
s3.download_fileobj(bucket, key, tmp)
try:
return process_h5_file(tmp.name)
except Exception as e:
return [] | 3e7419185ab3c3581ea24227c204fd207b113b1e | 16,255 |
def get_active_users(URM, popular_threshold=100):
"""
Get the users with activity above a certain threshold
:param URM: URM on which users will be extracted
:param popular_threshold: popularty threshold
:return:
"""
return _get_popular(URM, popular_threshold, axis=1) | 6b05e1a4288e00903ce9b396407c4e3547402710 | 16,256 |
def default_data_to_device(
input, target=None, device: str = "cuda", non_blocking: bool = True
):
"""Sends data output from a PyTorch Dataloader to the device."""
input = input.to(device=device, non_blocking=non_blocking)
if target is not None:
target = target.to(device=device, non_blocking=non_blocking)
return input, target | 8dafddbd52b54a576ddc67d7d79af4372fbd57dc | 16,257 |
def _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, context,
column_width, tab_spaces, patch_filename=None):
"""Helper function that returns objects for diff2 views"""
ps_left = models.PatchSet.get_by_id(int(ps_left_id), parent=request.issue.key)
if ps_left is None:
return HttpTextResponse(
'No patch set exists with that id (%s)' % ps_left_id, status=404)
ps_left.issue_key = request.issue.key
ps_right = models.PatchSet.get_by_id(
int(ps_right_id), parent=request.issue.key)
if ps_right is None:
return HttpTextResponse(
'No patch set exists with that id (%s)' % ps_right_id, status=404)
ps_right.issue_key = request.issue.key
if patch_id is not None:
patch_right = models.Patch.get_by_id(int(patch_id), parent=ps_right.key)
else:
patch_right = None
if patch_right is not None:
patch_right.patchset_key = ps_right.key
if patch_filename is None:
patch_filename = patch_right.filename
# Now find the corresponding patch in ps_left
patch_left = models.Patch.query(
models.Patch.filename == patch_filename,
ancestor=ps_left.key).get()
if patch_left:
try:
new_content_left = patch_left.get_patched_content()
except FetchError as err:
return HttpTextResponse(str(err), status=404)
lines_left = new_content_left.lines
elif patch_right:
lines_left = patch_right.get_content().lines
else:
lines_left = []
if patch_right:
try:
new_content_right = patch_right.get_patched_content()
except FetchError as err:
return HttpTextResponse(str(err), status=404)
lines_right = new_content_right.lines
elif patch_left:
lines_right = patch_left.get_content().lines
else:
lines_right = []
rows = engine.RenderDiff2TableRows(request,
lines_left, patch_left,
lines_right, patch_right,
context=context,
colwidth=column_width,
tabspaces=tab_spaces)
rows = list(rows)
if rows and rows[-1] is None:
del rows[-1]
return dict(patch_left=patch_left, patch_right=patch_right,
ps_left=ps_left, ps_right=ps_right, rows=rows) | 47aef66544acec7d57125f3c7c0f8edb385ba150 | 16,258 |
def vec_add(iter_a, iter_b):
"""element wise addition"""
if len(iter_a) != len(iter_b):
raise ValueError
return (a + b for a, b in zip(iter_a, iter_b)) | f3e5bf50d61cfe518ee8b0eb838503a7f054baa8 | 16,259 |
def run():
""" Read inputs into a dictionary for recursive searching """
for line in inputs:
# Strip the trailing "." and split
container, rest = line[:-1].split(" contain ")
# Strip the trailing " bags"
container = container[:-5]
contained = []
for bag in rest.split(", "):
if bag[:2] != "no":
# Strip the leading number and the trailing "bags" or " bag"
contained.append(bag[2:-4].strip())
bags[container] = contained
return sum(1 if search(bag) else 0 for bag in bags) | c3b565efbb923562c13955d808cf6ac2f09b616b | 16,260 |
def _get_prolongation_coordinates(grid, d1, d2):
"""Calculate required coordinates of finer grid for prolongation."""
D2, D1 = np.broadcast_arrays(
getattr(grid, 'vectorN'+d2), getattr(grid, 'vectorN'+d1)[:, None])
return np.r_[D1.ravel('F'), D2.ravel('F')].reshape(-1, 2, order='F') | 6534c456413cd062f9c35c14f5d9b57b1aba6c12 | 16,261 |
import os
from functools import cmp_to_key
def get_sorted_filediffs(filediffs, key=None):
"""Sorts a list of filediffs.
The list of filediffs will be sorted first by their base paths in
ascending order.
Within a base path, they'll be sorted by base name (minus the extension)
in ascending order.
If two files have the same base path and base name, we'll sort by the
extension in descending order. This will make :file:`*.h` sort ahead of
:file:`*.c`/:file:`*.cpp`, for example.
If the list being passed in is actually not a list of FileDiffs, it
must provide a callable ``key`` parameter that will return a FileDiff
for the given entry in the list. This will only be called once per
item.
"""
def cmp_filediffs(filediff1, filediff2):
x = make_key(filediff1)
y = make_key(filediff2)
# Sort based on basepath in ascending order.
if x[0] != y[0]:
a = x[0]
b = y[0]
else:
# Sort based on filename in ascending order, then based on
# the extension in descending order, to make *.h sort ahead of
# *.c/cpp.
x_file, x_ext = os.path.splitext(x[1])
y_file, y_ext = os.path.splitext(y[1])
if x_file == y_file:
a = y_ext
b = x_ext
else:
a = x_file
b = y_file
return cmp(a, b)
def make_key(filediff):
if key:
filediff = key(filediff)
filename = filediff.dest_file
i = filename.rfind('/')
if i == -1:
return '', filename
else:
return filename[:i], filename[i + 1:]
return sorted(filediffs, key=cmp_to_key(cmp_filediffs)) | dfd7fe9436bc59f5949dca838f82af01d14bfe83 | 16,262 |
def get_info(obj):
"""
get info from account obj
:type obj: account object
:param obj: the object of account
:return: dict of account info
"""
if obj:
return dict(db_instance_id=obj.dbinstance_id,
account_name=obj.account_name,
account_status=obj.account_status,
account_type=obj.account_type,
account_description=obj.account_description,
database_privileges=obj.database_privileges)
return {} | c654ab1bdb4b4bf20223172dae450e1e7e6a52b9 | 16,263 |
def vlookup(x0, vals, ind, approx=True):
"""
Equivalent to the spreadsheet VLOOKUP function
:param vals: array_like
2d array of values - first column is searched for index
:param x0:
:param ind:
:param approx:
:return:
"""
if isinstance(vals[0][0], str):
x0 = str(x0)
if not approx: # need exact match
return vals[int(ind)][np.where(x0 == np.array(vals[0]))[0][0]]
else:
inds = np.searchsorted(vals[0], x0, side='right') - 1
return vals[ind][int(inds)] | 59ee6ecd7c001bf6cf3f03ad678d93eda33f5e21 | 16,264 |
def matmul(a00, a10, a01, a11, b00, b10, b01, b11):
"""
Compute 2x2 matrix mutiplication in vector way
C = A*B
C = [a00 a01] * [b00 b01] = [c00 c01]
[a10 a11] [b10 b11] [c10 c11]
"""
c00 = a00*b00 + a01*b10
c10 = a10*b00 + a11*b10
c01 = a00*b01 + a01*b11
c11 = a10*b01 + a11*b11
return c00, c10, c01, c11 | d34506cc8099cbbf8b7a9e1eb9d4d068d768ebac | 16,265 |
import collections
import random
def random_sample_with_weight_and_cost(population, weights, costs, cost_limit):
"""
Like random_sample_with_weight but with the addition of a cost and limit.
While performing random samples (with priority for higher weight) we'll keep track of cost
If cost exceeds the cost limit, we stop selecting
Basically the knapsack problem, but with deliberately random selection rather than dynamic optimization
"""
population_weights = {request: weight for (request, weight) in zip(population, weights)}
population_costs = {request: cost for (request, cost) in zip(population, costs)}
selected = []
not_selected = []
cost = 0
# Create a Counter from the population, assigning count by weight
counter = collections.Counter(population_weights)
while counter:
# Turn the Counter into a list for random selection from
# The list will have n repetitions of an element with weight n
choice = random.choice(list(counter.elements()))
choice_cost = population_costs[choice]
# If the cost would cause us to exceed our limit it shouldn't be selected
if cost + choice_cost > cost_limit:
not_selected.append(choice)
else:
cost += choice_cost
selected.append(choice)
# When chosen (whether selected or not), remove the element from the population
# Effectively removes all repetitions of the element
counter.pop(choice)
return selected, not_selected | 637afd1c0e83bbda879f41bd15feb0f65b238fb3 | 16,266 |
def hardnet68ds(pretrained=False, **kwargs):
""" # This docstring shows up in hub.help()
Harmonic DenseNet 68ds (Depthwise Separable) model
pretrained (bool): kwargs, load pretrained weights into the model
"""
# Call the model, load pretrained weights
model = hardnet.HarDNet(depth_wise=True, arch=68, pretrained=pretrained)
return model | 5167b79f8effdb9a4b94e9d0a7902f35468a1d8b | 16,267 |
def get_config():
"""Base config for training models."""
config = ml_collections.ConfigDict()
# How often to save the model checkpoint.
config.save_checkpoints_steps: int = 1000
# Frequency fo eval during training, e.g. every 1000 steps.
config.eval_frequency: int = 1000
# Total batch size for training.
config.train_batch_size: int = 32
# Total batch size for eval.
config.eval_batch_size: int = 8
# The base learning rate for Adam.
config.learning_rate: float = 1e-4
# Initial checkpoint directory (usually from a pre-trained model).
config.init_checkpoint_dir: str = ''
# Whether to lower case the input text. Should be True for uncased models and
# False for cased models.
config.do_lower_case: bool = True
# Model parameters.
# For pre-training, we only need 2 segment types (for NSP), but we allow up to
# 4 for GLUE/SuperGLUE fine-tuning.
config.type_vocab_size: int = 4
# Embedding dimension for each token.
config.d_emb: int = 768
# Hidden dimension of model.
config.d_model: int = 768
# Hidden dimension for feed-forward layer.
config.d_ff: int = 3072
# The maximum total input sequence length after tokenization. Sequences longer
# than this will be truncated, and sequences shorter than this will be padded.
config.max_seq_length: int = 512
# Number of self-attention heads. Only used for BERT models.
config.num_heads: int = 12
# Number of model blocks / layers.
config.num_layers: int = 12
# Regular dropout rate, applied throughout model.
config.dropout_rate: float = 0.1
# Dropout rate used in mixing module, e.g. self-attention sublayer.
config.mixing_dropout_rate: float = 0.1
# Determines how discrete Fourier Transforms are computed. Only used for FNet
# models. Set to true if running on TPU hardware, in which case matrix
# multiplications will be favored for relatively shorter input sequences. Set
# to false for GPU/CPU hardware, in which case FFTs are used for all input
# sequence lengths.
config.use_tpu_fourier_optimizations: bool = False
# Dummy parameter for repeated runs.
config.trial: int = 0
return config | 67dfe8aff3f1a3e660d9debccc181690ea561ae2 | 16,268 |
def slave_addresses(dns):
"""List of slave IP addresses
@returns: str Comma delimited list of slave IP addresses
"""
return ', '.join(['{}:53'.format(s['address'])
for s in dns.pool_config]) | e293442272496f02a58055dd778ecfe875124ccd | 16,269 |
def processAndLabelStates(role, states, reason, positiveStates=None, negativeStates=None, positiveStateLabelDict={}, negativeStateLabelDict={}):
"""Processes the states for an object and returns the appropriate state labels for both positive and negative states.
@param role: The role of the object to process states for (e.g. C{ROLE_CHECKBOX}.
@type role: int
@param states: The raw states for an object to process.
@type states: set
@param reason: The reason to process the states (e.g. C{REASON_FOCUS}.
@type reason: str
@param positiveStates: Used for C{REASON_CHANGE}, specifies states changed from negative to positive;
@type positiveStates: set
@param negativeStates: Used for C{REASON_CHANGE}, specifies states changed from positive to negative;
@type negativeStates: setpositiveStateLabelDict={}, negativeStateLabelDict
@param positiveStateLabelDict: Dictionary containing state identifiers as keys and associated positive labels as their values.
@type positiveStateLabelDict: dict
@param negativeStateLabelDict: Dictionary containing state identifiers as keys and associated negative labels as their values.
@type negativeStateLabelDict: dict
@return: The labels of the relevant positive and negative states.
@rtype: [str, ...]
"""
mergedStateLabels=[]
positiveStates = processPositiveStates(role, states, reason, positiveStates)
negativeStates = processNegativeStates(role, states, reason, negativeStates)
for state in sorted(positiveStates | negativeStates):
if state in positiveStates:
mergedStateLabels.append(positiveStateLabelDict.get(state, stateLabels[state]))
elif state in negativeStates:
# Translators: Indicates that a particular state of an object is negated.
# Separate strings have now been defined for commonly negated states (e.g. not selected and not checked),
# but this still might be used in some other cases.
# %s will be replaced with the full identifier of the negated state (e.g. selected).
mergedStateLabels.append(negativeStateLabelDict.get(state, negativeStateLabels.get(state, _("not %s") % stateLabels[state])))
return mergedStateLabels | 23be0c7d943961f756a02abea98c51500f92b00f | 16,270 |
def shape_for_stateful_rnn(data, batch_size, seq_length, seq_step):
"""
Reformat our data vector into input and target sequences to feed into our
RNN. Tricky with stateful RNNs.
"""
# Our target sequences are simply one timestep ahead of our input sequences.
# e.g. with an input vector "wherefore"...
# targets: h e r e f o r e
# predicts ^ ^ ^ ^ ^ ^ ^ ^
# inputs: w h e r e f o r
inputs = data[:-1]
targets = data[1:]
# We split our long vectors into semi-redundant seq_length sequences
inputs = _create_sequences(inputs, seq_length, seq_step)
targets = _create_sequences(targets, seq_length, seq_step)
# Make sure our sequences line up across batches for stateful RNNs
inputs = _batch_sort_for_stateful_rnn(inputs, batch_size)
targets = _batch_sort_for_stateful_rnn(targets, batch_size)
# Our target data needs an extra axis to work with the sparse categorical
# crossentropy loss function
targets = targets[:, :, np.newaxis]
return inputs, targets | 431eb54acc9bfe2281a3a863335eb135f050f47e | 16,271 |
import time
import tqdm
def setup_features(dataRaw, label='flux', notFeatures=[], pipeline=None, verbose=False, resample=False, returnAll=None):
"""Example function with types documented in the docstring.
For production level usage: All scaling and transformations must be done
with respect to the calibration data distributions
Args:
features (nD-array): Array of input raw features.
labels (1D-array): The second parameter.
pipeline (int): The first parameter.
label_scaler (str): The second parameter.
feature_scaler (str): The second parameter.
Returns:
features_transformed, labels_scaled
.. _PEP 484:
https://github.com/ExoWanderer/
"""
# if label in notFeatures: notFeatures.remove(label)
if isinstance(dataRaw,str):
dataRaw = pd.read_csv(filename)
elif isinstance(dataRaw, dict):
dataRaw = pd.DataFrame(dataRaw)
elif not isinstance(dataRaw, pd.DataFrame):
raise TypeError('The input must be a `pandas.DataFrame` or a `dict` with Equal Size Entries (to convert to df here)')
# WHY IS THIS ALLOWED TO NOT HAVE PARENTHESES?
# assert isinstance(dataRaw, pd.DataFrame), 'The input must be a Pandas DataFrame or Dictionary with Equal Size Entries'
inputData = dataRaw.copy()
# PLDpixels = pd.DataFrame({key:dataRaw[key] for key in dataRaw.columns if 'pix' in key})
pixCols = [colname for colname in inputData.columns if 'pix' in colname.lower() or 'pld' in colname.lower()]
PLDnorm = np.sum(np.array(inputData[pixCols]),axis=1)
inputData[pixCols] = (np.array(inputData[pixCols]).T / PLDnorm).T
# # Overwrite the PLDpixels entries with the normalized version
# for key in dataRaw.columns:
# if key in PLDpixels.columns:
# inputData[key] = PLDpixels[key]
#
# Assign the labels
n_PLD = len([key for key in dataRaw.keys() if 'err' not in colname.lower() and ('pix' in key.lower() or 'pld' in key.lower())])
input_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' not in colname.lower()]
errors_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' in colname.lower()]
# resampling_inputs = ['flux', 'xpos', 'ypos', 'xfwhm', 'yfwhm', 'bg_flux', 'bmjd', 'np'] + ['pix{}'.format(k) for k in range(1,10)]
# resampling_errors = ['fluxerr', 'xerr', 'yerr', 'xerr', 'yerr', 'sigma_bg_flux', 'bmjd_err', 'np_err'] + ['fluxerr']*n_PLD
start = time()
if resample:
print("Resampling ", end=" ")
inputData = pd.DataFrame({colname:np.random.normal(dataRaw[colname], dataRaw[colerr]) \
for colname, colerr in tqdm(zip(input_labels, errors_labels), total=len(input_labels))
})
print("took {} seconds".format(time() - start))
else:
inputData = pd.DataFrame({colname:dataRaw[colname] for colname in input_labels})
labels = dataRaw[label].values
# explicitly remove the label
if label in inputData.columns: inputData.drop(label, axis=1, inplace=True)
feature_columns = [colname for colname in inputData.columns if colname not in notFeatures]
features = inputData[feature_columns].values
if verbose: print('Shape of Features Array is', features.shape)
if verbose: start = time()
# labels_scaled = labels# label_scaler.fit_transform(labels[:,None]).ravel() if label_scaler is not None else labels
features_trnsfrmd = pipeline.fit_transform(features) if pipeline is not None else features
if verbose: print('took {} seconds'.format(time() - start))
collection = features_trnsfrmd, labels
if returnAll == True:
collection = features_trnsfrmd, labels, pipeline
if returnAll == 'features':
collection = features_trnsfrmd
if returnAll == 'with raw data':
collection.append(dataRaw)
return collection | 7c1fb86dc66d97610bd1d22ef65ccb88e105dd92 | 16,272 |
from typing import List
from typing import Any
def plot_marginal_effects(model: ModelBridge, metric: str) -> AxPlotConfig:
"""
Calculates and plots the marginal effects -- the effect of changing one
factor away from the randomized distribution of the experiment and fixing it
at a particular level.
Args:
model: Model to use for estimating effects
metric: The metric for which to plot marginal effects.
Returns:
AxPlotConfig of the marginal effects
"""
plot_data, _, _ = get_plot_data(model, {}, {metric})
arm_dfs = []
for arm in plot_data.in_sample.values():
arm_df = pd.DataFrame(arm.parameters, index=[arm.name])
arm_df["mean"] = arm.y_hat[metric]
arm_df["sem"] = arm.se_hat[metric]
arm_dfs.append(arm_df)
effect_table = marginal_effects(pd.concat(arm_dfs, 0))
varnames = effect_table["Name"].unique()
data: List[Any] = []
for varname in varnames:
var_df = effect_table[effect_table["Name"] == varname]
data += [
go.Bar(
x=var_df["Level"],
y=var_df["Beta"],
error_y={"type": "data", "array": var_df["SE"]},
name=varname,
)
]
fig = subplots.make_subplots(
cols=len(varnames),
rows=1,
subplot_titles=list(varnames),
print_grid=False,
shared_yaxes=True,
)
for idx, item in enumerate(data):
fig.append_trace(item, 1, idx + 1)
fig.layout.showlegend = False
# fig.layout.margin = go.layout.Margin(l=2, r=2)
fig.layout.title = "Marginal Effects by Factor"
fig.layout.yaxis = {
"title": "% better than experiment average",
"hoverformat": ".{}f".format(DECIMALS),
}
return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC) | f68c72d54e4e8ff1011ae6daec8a00ab30069d78 | 16,273 |
def _client_row_class(client: dict) -> str:
"""
Set the row class depending on what's in the client record.
"""
required_cols = ['trust_balance', 'refresh_trigger']
for col in required_cols:
if col not in client:
return 'dark'
try:
if client['trust_balance'] > client['refresh_trigger']:
return 'success'
except TypeError:
return 'dark'
return 'danger' | cd5ebd8fd64c7d994d6803df473cd317af65e9ac | 16,274 |
def num2ord(place):
"""Return ordinal for the given place."""
omap = { u'1' : u'st',
u'2' : u'nd',
u'3' : u'rd',
u'11' : u'th',
u'12' : u'th',
u'13' : u'th' }
if place in omap:
return place + omap[place]
elif place.isdigit():
if len(place) > 1 and place[-1] in omap: # last digit 1,2,3
return place + omap[place[-1]]
else:
return place + u'th'
else:
return place | 3552257bba134ac00ed8c68d72bf5c947424b2e7 | 16,275 |
from typing import Type
def _get_dist_class(
policy: Policy, config: AlgorithmConfigDict, action_space: gym.spaces.Space
) -> Type[TFActionDistribution]:
"""Helper function to return a dist class based on config and action space.
Args:
policy: The policy for which to return the action
dist class.
config: The Algorithm's config dict.
action_space (gym.spaces.Space): The action space used.
Returns:
Type[TFActionDistribution]: A TF distribution class.
"""
if hasattr(policy, "dist_class") and policy.dist_class is not None:
return policy.dist_class
elif config["model"].get("custom_action_dist"):
action_dist_class, _ = ModelCatalog.get_action_dist(
action_space, config["model"], framework="tf"
)
return action_dist_class
elif isinstance(action_space, Discrete):
return Categorical
elif isinstance(action_space, Simplex):
return Dirichlet
else:
assert isinstance(action_space, Box)
if config["normalize_actions"]:
return SquashedGaussian if not config["_use_beta_distribution"] else Beta
else:
return DiagGaussian | 08c09b876d5c2797d517a87957049c34939aee3a | 16,276 |
def expectation_values(times, states, operator):
"""expectation values of operator at times wrt states"""
def exp_value(state, operator, time):
if len(state.shape) == 2: #DensityMatrix
return np.trace(np.dot(state, operator(time)))
else: #StateVector
return np.vdot(state, np.dot(operator(time), state))
evs = np.ndarray(times.shape, dtype=complex)
for i in range(times.shape[0]):
evs[i] = exp_value(states[i], operator, times[i])
return evs | 4c18fa3b2ad7bec01f8f833ade59fe90315724ec | 16,277 |
def compute_f_all(F1,fft_size,windowing,dtype_complex,F_frac=[],F_fs=[],F_refs=[],freq_channel=0,\
F_first_sample=[],F_rates=[],F_pcal_fix=[],F_side=[],F_ind=[],F_lti=[]):
"""
Compute FFTs for all stations (all-baselines-per-task mode), and correct for fractional sample correction (linear phase).
Parameters
----------
F1
list of stored samples (corresponding actually to F1_partial). Each element of the list is a numpy array
with the complex samples in the time domain, with a number of samples that is a multiply of the FFT length.
fft_size : int
number of coefficients in the FFT.
windowing : str
shape of the window before FFT, currently 'square' by default.
dtype_complex: type of data for initialization of the rotators.
F_frac
fractional and integer offsets applied at the mapper (acces via F_refs).
F_fs
sampling frequency for each stream in F1.
F_refs
indices to acces F_frac etc based on F_ind, i.e. from stored to new.
freq_channel
sky frequency.
F_first_sample
first sample number (actually last sample number plus one, it has to be corrected by subtracting the number of samples in F1.
F_rates
delay information for each of the streams (access via F_refs).
F_pcal_fix
offset for pcal accumulation results (due to the initial offset applied in the mapper). Later the pcal
signals will be realigned as if no delay was applied to them.
F_side
list of single side band side for each stream, 'l' LSB or 'u' USB (access via F_refs).
F_ind
list of station-polarization identifiers corresponding to the streams in F1 (this actually corresponds
to F1_ind_partial.
Returns
-------
F1_fft
list of array of arrays with FFTs with rotations applied.
None
[unused] previously outputing the conjugate of F1_fft, removed for efficiency.
F_adj_shift_partial_out
[unused] previously used to keep track of the number of samples to
add/drop due to fractional sample overflows, superseded for F_frac_over.
F_adj_shift_pcal_out
[unused] previously used to keep track of the number of samples to
roll the phase calibration results prior to FFT them, superseded for F_pcal_fix_out.
F_pcal_fix_out
list with number of samples to roll the pcal streams prior to FFT them.
F_first_sample_out
first sample for each stream (actually last sample number plus one).
Notes
-----
|
| **Procedure:**
|
| For each element in F1:
| 1. Create an array of arrays with the FFTs of the samples grouped into arrays of fft_size samples.
| 2. Create a frequency scale of fft_size (linear from 0 to (n-1)/n).
| 3a. If the computations have already been done for the same station, take the results.
| 3b. Otherwise:
| Compute delay for the first sample, then fractional part of this delay, then scale frequency scale, then exponential.
| Rotate the FFT using the previous rotator.
|
|
| **References:**
|
| [Th04] p363
|
|
| **TO DO:**
|
| Detail where in the FFT the fractional sample for the rotator is evaluated.
|
| Check correction to phase in p363.
"""
F_adj_shift_partial_out=[]
F_adj_shift_partial_mon=[]
F_adj_shift_pcal_out=[]
F_pcal_fix_out=[]
F_first_sample_out=[]
# TO DO: assuming all data same type for now
[sideband,data_type]=F_side[0]
# Windowing and FFT
first_iteration=1
last_fractional_recalc=0
last_str_st=""
F1_fft = window_and_fft(F1,fft_size,windowing,flatten_chunks=0,dtype_complex=dtype_complex) # ,rfft_data_type=data_type)
# If real samples take only half FFT (LSB or USB as applicable)
if data_type=='r':
if sideband=='L':
F1_fft = np.delete(F1_fft,np.s_[:fft_size//2],2)
else:
F1_fft = np.delete(F1_fft,np.s_[fft_size//2:],2)
shift_int=0
# Fractional sample correction
F1_fft_rot=np.zeros(F1_fft.shape,dtype=dtype_complex)
error_f_frac=1
if F_rates!=[]:
if data_type=='c':
freqscale2 = np.arange(0,1,1/float(fft_size))
fft_size_comp=fft_size
else:
if sideband=='L':
freqscale2 = float(-1)*np.arange(0.5,0,float(-1)/float(fft_size)) # First half of the full vector (e.g. [-0.5 -0.375 -0.25 -0.125] with fft_size=8)
else:
freqscale2 = np.arange(0,1,1/float(fft_size))[:fft_size//2] # Second half the full vector (e.g. [ 0. 0.125 0.25 0.375] with fft_size=8)
fft_size_comp=fft_size//2
# p363
for stpol in range(F1_fft.shape[0]):
fs=F_fs[F_refs[stpol]]
Ts=1/fs
[sideband,data_type]=F_side[F_refs[stpol]]
#str_st=F_ind[F_refs[stpol]].split('.')[0]
str_st=F_ind[stpol].split('.')[0]
#sample0=F_first_sample[F_refs[stpol]]-len(F1[F_refs[stpol]])
sample0=F_first_sample[F_refs[stpol]]
##adjustments (padding) from hstack_samples...
#sample0+=F_lti[stpol][3]
num_samples=len(F1[F_refs[stpol]])
F_first_sample_out.append(sample0+num_samples)
computed=0
error_f_frac=0
i_row=-1
if last_str_st!=str_st or first_iteration:
if SAVE_TIME_ROTATIONS:
first_iteration=0
first_sample=sample0
first_sample_s=first_sample*Ts
[delay_rate_0,delay_rate_1,delay_rate_2,delay_rate_ref,clock_rate_0,\
clock_rate_1,clock_abs_rate_0,clock_abs_rate_1,clock_rate_ref,\
model_only_delay,clock_only_delay,diff_frac]=F_rates[F_refs[stpol]]
diff_frac=0
[fractional_sample_correction,shift_delay]=F_frac[F_refs[stpol]]
shift_s=shift_delay*fs
frtot_v=[]
first_iteration_recalc=1
# TO DO: consider centering in the interval (N//2)
timescale=[0]
clock_diff = [clock_rate_0,clock_rate_1]
poly_diff = [delay_rate_0,delay_rate_1,delay_rate_2]
clock_abs = [clock_abs_rate_0,clock_abs_rate_1]
seconds_ref_clock = clock_rate_ref
#if USE_NE_F:
# npr1 = np.arange(F1_fft.shape[1])
# total_timescale = ne.evaluate("Ts*(sample0+fft_size*npr1)") # slower
#else:
total_timescale =Ts*(sample0+fft_size*np.arange(F1_fft.shape[1]))
total_seconds_offset=0
[r_recalc,m_unused,c_recalc,r_unused,a_unused] = get_delay_val(\
clock_diff=clock_diff,\
poly_diff=poly_diff,\
seconds_ref_clock=seconds_ref_clock,\
seconds_ref_poly=delay_rate_ref,\
seconds=total_timescale,\
seconds_offset=total_seconds_offset,\
v=DEBUG_LIB_DELAY,diff_pol=DIFF_POLY)
[full_fractional_recalc,fractional_recalc] = get_full_frac_val(r_recalc,fs)
for row in range(F1_fft.shape[1]):
i_row+=1
fsr=sample0+i_row*fft_size
lsr=fsr+fft_size
if DEBUG_DELAYS:
print_debug_r_delays_f(stpol,F_refs[stpol],F_ind[stpol],fsr,num_samples,len(timescale),\
total_timescale[row],0.0,\
total_seconds_offset,r_recalc[row],r_unused,a_unused,fractional_sample_correction,\
full_fractional_recalc[row],fractional_recalc[row],diff_frac)
computed=0
if last_fractional_recalc!=fractional_recalc[row] or first_iteration_recalc:
if SAVE_TIME_ROTATIONS:
first_iteration_recalc=0
computed=1
#print(str_st)
#print(freqscale2*(fractional_recalc[row]))
[fr6,nr]=get_exp(freqscale2*(fractional_recalc[row]))
if not(nr):
#frtot=get_rotator([fr6])
frtot=fr6
frtot_v.append([frtot])
else:
frtot_v.append([1.0])
else:
# Skipping, copy last value
if not(nr):
frtot_v.append([frtot])
else:
frtot_v.append([1.0])
last_fractional_recalc=fractional_recalc[row]
last_str_st=str_st
for row in range(F1_fft.shape[1]):
if not nr:
try:
np.multiply(F1_fft[stpol,row,:],frtot_v[row][0],F1_fft[stpol,row,:])
except IndexError:
print("Error in rotation: "+str(len(frtot_v))+", "+str(F1_fft.shape))
if DEBUG_DELAYS:
print("zR"+KEY_SEP+"f "+str(stpol).rjust(5)+str(F_refs[stpol]).rjust(8)+F_ind[stpol].rjust(8)+\
str(fsr).rjust(10)+str(num_samples).rjust(10) +\
" C,R >>>>".ljust(191)+str(computed).rjust(3)+str(int(not(nr))).rjust(3))
if error_f_frac==0:
if DEBUG_FRAC_OVER:
# TO DO: create functions in lib_debug(?)
print("zR"+KEY_SEP+"o".ljust(35)+str(stpol).rjust(5)+str(F_refs[stpol]).rjust(8)+F_ind[stpol].rjust(8)+\
str(fsr).rjust(10)+str(num_samples).rjust(10)+str(len(timescale)).rjust(10)+\
str(timescale[0]).rjust(16)+str(r_recalc[0]).rjust(20)+\
str(full_fractional_recalc[row]).rjust(20)+\
str(fractional_recalc[row]).rjust(20)+\
#str(frac_re).rjust(10)+\
#str(total_frac_delay_int).rjust(10)+\
"".rjust(10)+\
#str(clock_frac_delay_diff).rjust(20)+\
"".rjust(20)+\
#str(clock_frac_delay_int).rjust(10))
"".rjust(10))
else:
if DEBUG_FRAC_OVER:
print("zR"+KEY_SEP+"o "+"error")
# Correction for pcal
F_pcal_fix_out.append(shift_delay)
else:
F_first_sample_out=F_first_sample
#for stpol in range(F1_fft.shape[0]):
# for row in range(F1_fft.shape[1]):
# F1_fft_rot[stpol,row,:]=F1_fft[F_refs[stpol],row,:]
print("zR\tWarning: no rotation: first sample "+str(F_first_sample))
if (len(F_pcal_fix)>=len(F_pcal_fix_out))or(error_f_frac==1):
F_pcal_fix_out=F_pcal_fix #[:]
#F2_fft_rot = np.conj(F1_fft_rot)
if DEBUG_DELAYS or DEBUG_LIB_DELAY: #or DEBUG_FRAC_OVER :
print("zR"+KEY_SEP+"oj".ljust(20)+str(len(F_adj_shift_partial_out))+" "+\
','.join(map(str,F_adj_shift_partial_out))+" "+\
" mon "+','.join(map(str,F_adj_shift_partial_mon)))
print("zR"+KEY_SEP+"---------------")
return([F1_fft,None,F_adj_shift_partial_out,F_adj_shift_pcal_out,F_pcal_fix_out,F_first_sample_out]) | d9a4838347e498472228992dc77d08d44ed01c6e | 16,278 |
import http
def bookmark(request):
"""
Add or remove a bookmark based on POST data.
"""
if request.method == 'POST':
# getting handler
model_name = request.POST.get('model', u'')
model = django_apps.get_model(*model_name.split('.'))
if model is None:
# invalid model -> bad request
return http.HttpResponseBadRequest(ERRORS['model'])
handler = handlers.library.get_handler(model)
if handler is None:
# bad or unregistered model -> bad request
return http.HttpResponseBadRequest(ERRORS['handler'])
# getting form
form = handler.get_form(request, data=request.POST)
if form.is_valid():
instance = form.instance()
bookmark_model = handler.backend.get_model()
# validating the bookmark key
key = handler.get_key(request, instance, form.cleaned_data['key'])
if not handler.allow_key(request, instance, key):
return http.HttpResponseBadRequest(ERRORS['key'])
# pre-save signal: receivers can stop the bookmark process
# note: one receiver is always called: *handler.pre_save*
# handler can disallow the vote
responses = signals.bookmark_pre_save.send(sender=bookmark_model,
form=form, request=request)
# if one of the receivers returns False then bookmark process
# must be killed
for receiver, response in responses:
if response is False:
return http.HttpResponseBadRequest(
u'Receiver %r killed the bookmark process' %
receiver.__name__)
# adding or removing the bookmark
bookmark = handler.save(request, form)
created = bool(bookmark.pk)
# post-save signal
# note: one receiver is always called: *handler.post_save*
signals.bookmark_post_save.send(sender=bookmark_model,
bookmark=bookmark, request=request, created=created)
# process completed successfully: redirect
return handler.response(request, bookmark, created)
# form is not valid: must handle errors
return handler.fail(request, form.errors)
# only answer POST requests
return http.HttpResponseForbidden('Forbidden.') | 32743894345e170d6d0efc427f3be0fb8d24b044 | 16,279 |
def start_nodenetrunner(nodenet_uid):
"""Starts a thread that regularly advances the given nodenet by one step."""
nodenets[nodenet_uid].is_active = True
if runner['runner'].paused:
runner['runner'].resume()
return True | 7511f217beb64936d403a5f5472036206f446c90 | 16,280 |
def transform_coordinates_3d(coordinates, RT):
"""
Input:
coordinates: [3, N]
RT: [4, 4]
Return
new_coordinates: [3, N]
"""
if coordinates.shape[0] != 3 and coordinates.shape[1]==3:
coordinates = coordinates.transpose()
coordinates = np.vstack([coordinates, np.ones((1, coordinates.shape[1]), dtype=np.float32)])
new_coordinates = RT @ coordinates
new_coordinates = new_coordinates[:3, :]/new_coordinates[3, :]
return new_coordinates | 8a31f97bddd1c84a21d4b396e877c2b327e6890b | 16,281 |
def _get_misclass_auroc(preds, targets, criterion, topk=1, expected_data_uncertainty_array=None):
"""
Get AUROC for Misclassification detection
:param preds: Prediction probabilities as numpy array
:param targets: Targets as numpy array
:param criterion: Criterion to use for scoring on misclassification detection.
:param topk: Top-kl class probabilities to consider while making predictions.
:param expected_data_uncertainty_array: Expected data uncertainty as numpy array
:return: AUROC on misclassification detection
"""
misclassification_targets = (1 - _misclass_tgt(preds, targets, (topk,))).astype(bool)
if criterion == 'entropy':
criterion_values = np.sum(-preds * np.log(preds), axis=1)
elif criterion == 'confidence':
criterion_values = -preds.max(axis=1)
elif criterion == 'model_uncertainty':
criterion_values = np.sum(-preds * np.log(preds), axis=1) - expected_data_uncertainty_array
else:
raise NotImplementedError
return auroc(misclassification_targets, criterion_values) | 282ef66926092e99a62003152daccf733913b6c2 | 16,282 |
from typing import Iterable
from typing import List
def flatten(l: Iterable) -> List:
"""Return a list of all non-list items in l
:param l: list to be flattened
:return:
"""
rval = []
for e in l:
if not isinstance(e, str) and isinstance(e, Iterable):
if len(list(e)):
rval += flatten(e)
else:
rval.append(e)
return rval | 2d2202c21e6da7064491d55d5519c259d10f42c0 | 16,283 |
def create_note(dataset_id, fhir_store_id, note_id): # noqa: E501
"""Create a note
Create a note # noqa: E501
:param dataset_id: The ID of the dataset
:type dataset_id: str
:param fhir_store_id: The ID of the FHIR store
:type fhir_store_id: str
:param note_id: The ID of the note that is being created
:type note_id: str
:rtype: NoteCreateResponse
"""
res = None
status = None
try:
store_name = None
try:
store_name = "datasets/%s/fhirStores/%s" % \
(dataset_id, fhir_store_id)
DbFhirStore.objects.get(name=store_name)
except DoesNotExist:
status = 400
res = Error("The specified FHIR store was not found", status)
return res, status
try:
note_create_request = NoteCreateRequest.from_dict(
connexion.request.get_json())
try:
DbPatient.objects.get(
fhirStoreName=store_name,
identifier=note_create_request.patient_id)
except DoesNotExist:
status = 400
res = Error("The specified patient was not found", status)
return res, status
resource_name = "%s/fhir/Note/%s" % (store_name, note_id)
DbNote(
identifier=note_id,
resourceName=resource_name,
fhirStoreName=store_name,
text=note_create_request.text,
type=note_create_request.type,
patientId=note_create_request.patient_id
).save()
note_resource_name = "%s/fhir/Note/%s" % (store_name, note_id)
res = NoteCreateResponse(name=note_resource_name)
status = 201
except NotUniqueError as error:
status = 409
res = Error("Conflict", status, str(error))
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status | 396f81b4a6035a9f295faebdd1aa313131d0da2b | 16,284 |
def load_credential_from_args(args):
"""load credential from command
Args:
args(str): str join `,`
Returns:
list of credential content
"""
if ',' not in args:
raise
file_path_list = args.split(',')
if len(file_path_list) != 2:
raise
if not file_path_list[0].endswith('.key'):
file_path_list[0], file_path_list[1] = file_path_list[1], file_path_list[0]
return [load_credential_from_file(file_path) for file_path in file_path_list] | 4f2e0b1e57ee3baaeb1bab3dc0e7e3874aaeec7c | 16,285 |
def encode(string: str, key: str) -> str:
"""
Encode string using the Caesar cipher with the given key
:param string: string to be encoded
:param key: letter to be used as given shift
:return: encoded string
:raises: ValueError if key len is invalid
"""
if len(key) > 1:
raise ValueError("[ERROR] Length of a key may not exceed 1 for Caesar cipher")
return vigener.encode(string, key) | ddba41c5efc01df06290cd6496ef8eb54dbb28be | 16,286 |
def compile_binary(binary, compiler, override_operator=None, **kw):
"""
If there are more than 10 elements in the `IN` set, inline them to avoid hitting the limit of \
the number of query arguments in Postgres (1<<15).
""" # noqa: D200
operator = override_operator or binary.operator
if operator is not in_op and operator is not notin_op:
return compiler.visit_binary(binary, override_operator=override_operator, **kw)
if isinstance(binary.right, BindParameter):
right_len = len(binary.right.value)
else:
right_len = 0
if right_len >= 10:
left = compiler.process(binary.left, **kw)
kw["literal_binds"] = True
use_any = getattr(binary, "any_values", False) and compiler.dialect.name == "postgresql"
negate = use_any and operator is notin_op
if use_any:
# ANY(VALUES ...) seems to be performing the best among these three:
# 1. IN (...)
# 2. IN(ARRAY[...])
# 3. IN(VALUES ...)
right = any_(Grouping(Values(
binary.left, literal_binds=True,
).data(TupleWrapper(binary.right.value))))
operator = operators.eq
else:
right = binary.right
right = compiler.process(right, **kw)
sql = left + OPERATORS[operator] + right
if negate:
sql = "NOT (%s)" % sql
return sql
elif operator is in_op and right_len == 1:
# IN (<value>) -> = <value>
return compiler.process(binary.left == binary.right.value[0], **kw)
return compiler.visit_binary(binary, override_operator=override_operator, **kw) | 1798ded35c12d6a3bf2e5edc34dcf11ff70ce697 | 16,287 |
from typing import Callable
from typing import Iterable
from typing import Iterator
import itertools
def flat_map(
fn: Callable[[_T], Iterable[_S]], collection: Iterable[_T]
) -> Iterator[_S]:
"""Map a function over a collection and flatten the result by one-level"""
return itertools.chain.from_iterable(map(fn, collection)) | a1a09611f920078cb25a23279004acd00ac23142 | 16,288 |
def create_vector_clock(node_id, timeout):
"""This method builds the initial vector clock for a new key.
Parameters
----------
node_id : int
the id of one node in the cluster
timeout : int
the expire timeout of the key
Returns
-------
dict
the vector clock as dictonary
"""
if node_id is not None and timeout is not None:
return {
"versions": [{"nodeId": node_id, "version": 1}],
"timestamp": timeout
}
else:
raise ValueError("You must gave the node id and the timeout.") | ed6df0e7e493d448f52e5fe47b55df8a1de94543 | 16,289 |
def ParseStateFoldersFromFiles(state_files):
"""Returns list of StateFolder objects parsed from state_files.
Args:
state_files: list of absolute paths to state files.
"""
def CreateStateFolder(folderpath, parent_namespace):
del parent_namespace # Unused by StateFolder.
return state_lib.StateFolder(folderpath)
return _ParseFoldersFromFiles(state_files, base_lib.ComponentType.MULTI_STATE,
CreateStateFolder) | c70421da1f193ca2dc86f12e7cffd84a1011af22 | 16,290 |
def spectral_norm(inputs, epsilon=1e-12, singular_value="left"):
"""Performs Spectral Normalization on a weight tensor.
Details of why this is helpful for GAN's can be found in "Spectral
Normalization for Generative Adversarial Networks", Miyato T. et al., 2018.
[https://arxiv.org/abs/1802.05957].
Args:
inputs: The weight tensor to normalize.
epsilon: Epsilon for L2 normalization.
singular_value: Which first singular value to store (left or right). Use
"auto" to automatically choose the one that has fewer dimensions.
Returns:
The normalized weight tensor.
"""
if len(inputs.shape) < 2:
raise ValueError(
"Spectral norm can only be applied to multi-dimensional tensors")
# The paper says to flatten convnet kernel weights from (C_out, C_in, KH, KW)
# to (C_out, C_in * KH * KW). Our Conv2D kernel shape is (KH, KW, C_in, C_out)
# so it should be reshaped to (KH * KW * C_in, C_out), and similarly for other
# layers that put output channels as last dimension. This implies that w
# here is equivalent to w.T in the paper.
w = tf.reshape(inputs, (-1, inputs.shape[-1]))
# Choose whether to persist the first left or first right singular vector.
# As the underlying matrix is PSD, this should be equivalent, but in practice
# the shape of the persisted vector is different. Here one can choose whether
# to maintain the left or right one, or pick the one which has the smaller
# dimension. We use the same variable for the singular vector if we switch
# from normal weights to EMA weights.
var_name = inputs.name.replace("/ExponentialMovingAverage", "").split("/")[-1]
var_name = var_name.split(":")[0] + "/u_var"
if singular_value == "auto":
singular_value = "left" if w.shape[0] <= w.shape[1] else "right"
u_shape = (w.shape[0], 1) if singular_value == "left" else (1, w.shape[-1])
u_var = tf.get_variable(
var_name,
shape=u_shape,
dtype=w.dtype,
initializer=tf.random_normal_initializer(),
trainable=False)
u = u_var
# Use power iteration method to approximate the spectral norm.
# The authors suggest that one round of power iteration was sufficient in the
# actual experiment to achieve satisfactory performance.
power_iteration_rounds = 1
for _ in range(power_iteration_rounds):
if singular_value == "left":
# `v` approximates the first right singular vector of matrix `w`.
v = tf.math.l2_normalize(
tf.matmul(tf.transpose(w), u), axis=None, epsilon=epsilon)
u = tf.math.l2_normalize(tf.matmul(w, v), axis=None, epsilon=epsilon)
else:
v = tf.math.l2_normalize(tf.matmul(u, w, transpose_b=True),
epsilon=epsilon)
u = tf.math.l2_normalize(tf.matmul(v, w), epsilon=epsilon)
# Update the approximation.
with tf.control_dependencies([tf.assign(u_var, u, name="update_u")]):
u = tf.identity(u)
# The authors of SN-GAN chose to stop gradient propagating through u and v
# and we maintain that option.
u = tf.stop_gradient(u)
v = tf.stop_gradient(v)
if singular_value == "left":
norm_value = tf.matmul(tf.matmul(tf.transpose(u), w), v)
else:
norm_value = tf.matmul(tf.matmul(v, w), u, transpose_b=True)
norm_value.shape.assert_is_fully_defined()
norm_value.shape.assert_is_compatible_with([1, 1])
w_normalized = w / norm_value
# Deflate normalized weights to match the unnormalized tensor.
w_tensor_normalized = tf.reshape(w_normalized, inputs.shape)
return w_tensor_normalized | eb6961e984fbb8eb5c3d807faa7fa6d016c011b5 | 16,291 |
def rs_for_staff(user_id):
"""Returns simple JSON for research studies in staff user's domain
---
tags:
- User
- ResearchStudy
operationId: research_studies_for_staff
parameters:
- name: user_id
in: path
description: TrueNTH user ID, typically subject or staff
required: true
type: integer
format: int64
produces:
- application/json
responses:
200:
description:
Returns the list of research_studies the requested staff user is
associated with.
schema:
id: nested_research_studies
properties:
research_study:
type: array
items:
type: object
required:
- title
properties:
title:
type: string
description: Research study title
resourceType:
type: string
description: FHIR like resourceType, "ResearchStudy"
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
security:
- ServiceToken: []
"""
user = get_user(
user_id, 'view', allow_on_url_authenticated_encounters=True)
if user.has_role(ROLE.PATIENT.value):
abort(
400,
"wrong request path for patient,"
" see /api/patient/<int:user_id>/research_study")
# Assume some staff like role - find all research studies
# in the org tree at, above or below all of the user's orgs
orgs = set()
ot = OrgTree()
for org in user.organizations:
try:
orgs.update(ot.at_and_above_ids(org.id))
except ValueError as ve:
raise ValueError(f"Failed at_and_above lookup on {org.id}")
orgs.update(ot.here_and_below_id(org.id))
studies = OrganizationResearchProtocol.query.filter(
OrganizationResearchProtocol.organization_id.in_(
tuple(orgs))).join(
ResearchProtocol,
OrganizationResearchProtocol.research_protocol_id ==
ResearchProtocol.id).with_entities(
ResearchProtocol.research_study_id).order_by(
ResearchProtocol.research_study_id).distinct()
results = [
ResearchStudy.query.get(s.research_study_id).as_fhir()
for s in studies]
return jsonify(research_study=results) | 9d36a02cc4909e336730fb27b3bcfe284bcd5d82 | 16,292 |
from typing import Mapping
from typing import Any
import tqdm
def from_hetionet_json(
hetionet_dict: Mapping[str, Any],
use_tqdm: bool = True,
) -> BELGraph:
"""Convert a Hetionet dictionary to a BEL graph."""
graph = BELGraph( # FIXME what metadata is appropriate?
name='Hetionet',
version='1.0',
authors='Daniel Himmelstein',
)
# FIXME add namespaces
# graph.namespace_pattern.update({})
kind_identifier_to_name = {
(x['kind'], x['identifier']): x['name']
for x in hetionet_dict['nodes']
}
edges = hetionet_dict['edges']
if use_tqdm:
edges = tqdm(edges, desc='Converting Hetionet', unit_scale=True)
it_logger = edges.write
else:
it_logger = logger.info
for edge in edges:
_add_edge(graph, edge, kind_identifier_to_name, it_logger)
return graph | 34db6048369945964c61aa4297164458f576a786 | 16,293 |
import re
import click
def validate_memory(_ctx, _param, value):
"""Validate memory string."""
if value is None:
return None
if not re.search(r'\d+[KkMmGg]$', value):
raise click.BadParameter('Memory format: nnn[K|M|G].')
return value | c050863a974c08ccc18fdaa2f03388c8f6674835 | 16,294 |
def BlockAvg3D( data , blocksize , mask ):
"""
3-D version of block averaging. Mainly applicable to making superpixel averages of datfile traces.
Not sure non-averaging calcs makes sense?
mask is a currently built for a 2d boolean array of same size as (data[0], data[1]) where pixels to be averaged are True.
"""
rows = data.shape[0]
cols = data.shape[1]
frames = data.shape[2]
if np.mod(rows,blocksize[0]) == 0 and np.mod(cols,blocksize[1]) == 0:
blockR = rows / blocksize[0]
blockC = cols / blocksize[1]
else:
print( 'Error, blocksize not evenly divisible into data size.')
return None
output = np.zeros((blockR,blockC,frames))
# Previous algorithm was slow and used annoying looping
# Improved algorithm that doeesn't need any looping. takes about 1.4 seconds instead of 60.
msk = np.array( mask , float )
msk.resize(rows, cols , 1 )
masked = np.array( data , float ) * np.tile( msk , ( 1 , 1 , frames ) )
step1 = masked.reshape(rows , blockC , -1 , frames).sum(2)
step2 = np.transpose(step1 , (1,0,2)).reshape(blockC , blockR , -1 , frames).sum(2)
step3 = np.transpose(step2 , (1,0,2))
mask1 = mask.reshape(rows , blockC , -1 ).sum(2)
count = mask1.transpose().reshape(blockC , blockR , -1).sum(2).transpose()
#mask1 = mask.reshape(rows , blockC , -1 , frames).sum(2)
#count = mask1.transpose().reshape(blockC , blockR , -1 , frames).sum(2).transpose()
output = step3 / count[:,:,np.newaxis]
output[ np.isnan(output) ] = 0
output[ np.isinf(output) ] = 0
return output | 4c0c9cb60c80f47289e7bff3e50ae3e39dd31c63 | 16,295 |
def build(buildconfig: BuildConfig, merge_train_and_test_data: bool = False):
"""Build regressor or classifier model and return it."""
estimator = buildconfig.algorithm.estimator()
if merge_train_and_test_data:
train_smiles, train_y = buildconfig.data.get_merged_sets()
else:
train_smiles, train_y, _, _ = buildconfig.data.get_sets()
train_X = descriptor_from_config(train_smiles, buildconfig.descriptor)
estimator.fit(train_X, train_y)
if merge_train_and_test_data:
train_scores = get_merged_train_score(estimator, buildconfig)
test_scores = None
else:
train_scores, test_scores = get_train_test_scores(estimator, buildconfig)
return estimator, train_scores, test_scores | 9a98f15ae9b966e42cda848169b38a651e727205 | 16,296 |
import databrowse.support.dummy_web_support as db_web_support_module
import databrowse.support.handler_support as handler_support_module
import os
def GetXML(filename, output=OUTPUT_ELEMENT, **params):
"""
Get the XML representation of a file, as produced by the Databrowse library
Arguments:
filename - Relative or absolute path to file of interest
output - Determines the type of output to be returned from the function
dbl.OUTPUT_ELEMENT returns an LXML etree.Element
dbl.OUTPUT_ETREE returns an LXML etree.ElementTree
dbl.OUTPUT_STRING returns a string containing the XML
dbl.OUTPUT_STDOUT prints the XML string and returns nothing
**params - A variable number of optional parameters that are treated the
same way as query string values that would be POST or GET to
the web server when Databrowse is being used from the web.
Usage:
>>> from databrowse.lib import db_lib as dbl
>>> dbl.GetXML('/tmp/emptyfile', output=dbl.OUTPUT_STDOUT)
<default:default>
<filename>emptyfile</filename>
<path>/tmp</path>
<size>0.0 byte</size>
<mtime>Tue Sep 3 10:12:40 2013</mtime>
<ctime>Tue Sep 3 10:12:40 2013</ctime>
<atime>Tue Sep 3 10:12:42 2013</atime>
<contenttype>text/plain</contenttype>
<permissions>-rw-rw-r--</permissions>
<owner>user:user</owner>
</default:default>
See also: DebugGetXML()
"""
# Set up web_support class with environment information
db_web_support = db_web_support_module.web_support(filename, params)
# Determine Requested File/Folder Absolute Path and Path Relative to Dataroot
if "path" not in db_web_support.req.form:
fullpath = db_web_support.dataroot
relpath = '/'
pass
else:
fullpath = os.path.abspath(db_web_support.req.form["path"].value)
if not fullpath.startswith(db_web_support.dataroot):
return db_web_support.req.return_error(403)
if os.access(fullpath, os.R_OK) and os.path.exists(fullpath):
if fullpath == db_web_support.dataroot:
relpath = '/'
pass
else:
relpath = fullpath.replace(db_web_support.dataroot, '')
pass
pass
elif not os.path.exists(fullpath):
return db_web_support.req.return_error(404)
else:
return db_web_support.req.return_error(401)
pass
# Import Plugin Directory
#if db_web_support.pluginpath not in sys.path: # Removed 8/5/13 - Transition to Installed Modules
# sys.path.append(db_web_support.pluginpath)
# Determine handler for requested path
#import handler_support as handler_support_module
handler_support = handler_support_module.handler_support(db_web_support.icondbpath, db_web_support.hiddenfiledbpath, db_web_support.directorypluginpath)
handlers = handler_support.GetHandler(fullpath)
handler = handlers[-1]
# Let's see if we want to override the default handler
if "handler" in db_web_support.req.form:
handler = db_web_support.req.form['handler'].value
pass
# Get A Handle to The Rendering Plugin
caller = "databrowse"
exec("import databrowse.plugins.%s.%s as %s_module" % (handler, handler, handler))
exec("renderer = %s_module.%s(relpath, fullpath, db_web_support, handler_support, caller, handlers%s%s%s)" % (handler, handler,\
', content_mode="' + db_web_support.req.form["content_mode"].value + '"' if "content_mode" in db_web_support.req.form else '',\
', style_mode="' + db_web_support.req.form['style_mode'].value + '"' if "style_mode" in db_web_support.req.form else '',\
', recursion_depth=' + db_web_support.req.form['recursion_depth'].value + '' if "recursion_depth" in db_web_support.req.form else ''))
# Register Primary Namespace
#etree.register_namespace('db', 'http://thermal.cnde.iastate.edu/databrowse')
if output == OUTPUT_ETREE:
return etree.ElementTree(renderer.getContent())
elif output == OUTPUT_STRING:
xmltree = etree.ElementTree(renderer.getContent())
return etree.tostring(xmltree)
elif output == OUTPUT_ELEMENT:
return renderer.getContent()
elif output == OUTPUT_STDOUT:
xmltree = etree.ElementTree(renderer.getContent())
print(etree.tostring(xmltree, pretty_print=True))
else:
return etree.ElementTree(renderer.getContent()) | 369a14f950371711c032805b74b8b640f1b4af66 | 16,297 |
def stellar_radius(M, logg):
"""Calculate stellar radius given mass and logg"""
if not isinstance(M, (int, float)):
raise TypeError('Mass must be int or float. {} type given'.format(type(M)))
if not isinstance(logg, (int, float)):
raise TypeError('logg must be int or float. {} type given'.format(type(logg)))
if M < 0:
raise ValueError('Only positive stellar masses allowed.')
M = float(M)
return M/(10**(logg-4.44)) | 2afbd991c7461d7861370f18d90df840569da857 | 16,298 |
def set_plus_row(sets, row):
"""Update each set in list with values in row."""
for i in range(len(sets)):
sets[i].add(row[i])
return sets | 87f448dc3199c8d3137d5811dd184b3d2bd7cbe3 | 16,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.