content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import time
def get_ntp_time(ntp_server_url):
"""
通过ntp server获取网络时间
:param ntp_server_url: 传入的服务器的地址
:return: time.strftime()格式化后的时间和日期
"""
ntp_client = ntplib.NTPClient()
ntp_stats = ntp_client.request(ntp_server_url)
fmt_time = time.strftime('%X', time.localtime(ntp_stats.tx_time))
fmt_date = time.strftime('%Y-%m-%d', time.localtime(ntp_stats.tx_time))
return fmt_time, fmt_date | 17881970361994e329e1154478c8abb8171461f9 | 16,100 |
def read_data(path, names, verbose=False):
"""
Read time-series from MATLAB .mat file.
Parameters
----------
path : str
Path (relative or absolute) to the time series file.
names : list
Names of the requested time series incl. the time array itself
verbose : bool, optional
Increase verbosity
Returns
-------
dict
Time and data
Examples
--------
>>> tname, names = read_names('data.mat')
>>> data = read_data('data.mat')
>>> t = data[tname] # time
>>> x1 = data[names[0]] # first data series
"""
if verbose:
print('Reading %s ...' % path)
data = loadmat(path, squeeze_me=True, variable_names=names)
return data | 978870d4517b5e5ab66186747c326794d6d43814 | 16,101 |
async def search(q: str, person_type: str = 'student') -> list:
"""
Search by query.
:param q: `str` query to search for
:param person_type: 'student', 'lecturer', 'group', 'auditorium'
:return: list of results
"""
url = '/'.join((BASE_URL, SEARCH_INDPOINT))
params = {'term': q,
'type': person_type}
return await api_request(url, params) | 83913866f45a44202ccddbc9352fef9799caf751 | 16,102 |
def format_data_hex(data):
"""Convert the bytes array to an hex representation."""
# Bytes are separated by spaces.
return ' '.join('%02X' % byte for byte in data) | 27239052d9ca0b12c19977e79d512e0cab04182e | 16,103 |
import glob
import os
def eval_test(test_path, gt_path, test_prefix='', gt_prefix='',
test_format='png', gt_format='png', exigence=2, desync=0):
"""
Evaluates some test results against a given ground truth
:param test_path: (str) relative or absolute path to the test results images
:param gt_path: (str) relative or absolute path to the ground truth images
:param test_prefix: (str) prefix of the test files before their ID (e.g.
test_A_001235.png has test_A_ as prefix)
:param gt_prefix: (str) prefix of the ground truth files before their ID
(e.g. gt001235.png has gt as prefix)
:param test_format: (str) format of the test images
:param gt_format: (str) format of the ground truth images
:param exigence: (int) tells how easy will be from a pixel to be foreground
in the ground truth:
- 0: all non-static pixels will be taken as foreground
- 1: all non-static pixels excepting hard shadows will be taken as
foreground
- 2: only pixels with motion inside the region of interest will be taken
as foreground
- 3: only pixels with known motion inside the region of interest will be
taken as foreground
- Else exigence=2 will be assumed
:return: (dict) results of the test analysis.
- TP: (int) true positives
- FP: (int) false positives
- FN: (int) false negatives
- TN: (int) true negatives
"""
if exigence is 0:
fg_thresh = 25
elif exigence is 1:
fg_thresh = 75
elif exigence is 3:
fg_thresh = 200
else:
fg_thresh = 100
data = dict(TP=0, FP=0, FN=0, TN=0)
for filename in glob.glob(os.path.join(test_path,
test_prefix + '*.' + test_format)):
pil_img_test = Image.open(filename)
img_test = np.array(pil_img_test)
f_id = filename.replace(os.path.join(test_path, test_prefix), '')
f_id = f_id.replace('.' + test_format, '')
try:
f_id = str(int(f_id) + desync).zfill(6)
except:
print('Erroneous type of Id in data files will result in fake '
'results.')
filename_gt = os.path.join(gt_path, gt_prefix + f_id + '.' + gt_format)
pil_img_gt = Image.open(filename_gt)
real_img_gt = np.array(pil_img_gt)
img_gt = np.where(real_img_gt > fg_thresh, 1, 0)
trues_test = img_test.astype(bool)
trues_gt = img_gt.astype(bool)
img_tp = np.logical_and(trues_test, trues_gt)
img_fp = np.logical_and(trues_test, np.logical_not(trues_gt))
img_fn = np.logical_and(np.logical_not(trues_test), trues_gt)
img_tn = np.logical_not(np.logical_and(trues_test, trues_gt))
data['TP'] += img_tp.sum()
data['FP'] += img_fp.sum()
data['FN'] += img_fn.sum()
data['TN'] += img_tn.sum()
return data | b71475bf3c51e5a69e8498e60b8131a00fcd3d6f | 16,104 |
from datetime import datetime
import requests
import dateutil
def get_installation_token(installation):
"""
Get access token for installation
"""
now = datetime.datetime.now().timestamp()
if installation_token_expiry[installation] is None or now + 60 > installation_token_expiry[installation]:
# FIXME: if .netrc file is present, Authorization header will get
# overwritten, so need to figure out how to ignore that file.
if netrc_exists():
raise Exception("Authentication does not work properly if a ~/.netrc "
"file exists. Rename that file temporarily and try again.")
headers = {}
headers['Authorization'] = 'Bearer {0}'.format(get_json_web_token())
headers['Accept'] = 'application/vnd.github.machine-man-preview+json'
url = 'https://api.github.com/installations/{0}/access_tokens'.format(installation)
req = requests.post(url, headers=headers)
resp = req.json()
if not req.ok:
if 'message' in resp:
raise Exception(resp['message'])
else:
raise Exception("An error occurred when requesting token")
installation_token[installation] = resp['token']
installation_token_expiry[installation] = dateutil.parser.parse(resp['expires_at']).timestamp()
return installation_token[installation] | e5bf43f601ca9e155dcf296179d778b78b2cc67a | 16,105 |
def disp(cog_x, cog_y, src_x, src_y):
"""
Compute the disp parameters
Parameters
----------
cog_x: `numpy.ndarray` or float
cog_y: `numpy.ndarray` or float
src_x: `numpy.ndarray` or float
src_y: `numpy.ndarray` or float
Returns
-------
(disp_dx, disp_dy, disp_norm, disp_angle, disp_sign):
disp_dx: 'astropy.units.m`
disp_dy: 'astropy.units.m`
disp_norm: 'astropy.units.m`
disp_angle: 'astropy.units.rad`
disp_sign: `numpy.ndarray`
"""
disp_dx = src_x - cog_x
disp_dy = src_y - cog_y
disp_norm = np.sqrt(disp_dx**2 + disp_dy**2)
if hasattr(disp_dx, '__len__'):
disp_angle = np.arctan(disp_dy / disp_dx)
disp_angle[disp_dx == 0] = np.pi / 2. * np.sign(disp_dy[disp_dx == 0])
else:
if disp_dx == 0:
disp_angle = np.pi/2. * np.sign(disp_dy)
else:
disp_angle = np.arctan(disp_dy/disp_dx)
disp_sign = np.sign(disp_dx)
return disp_dx, disp_dy, disp_norm, disp_angle, disp_sign | e9d8166827e86a8e2180ba357a450aca817fdff4 | 16,106 |
def get_landmark_from_prob(prob, thres=0.5, mode="mean", binary_mask=False):
"""Compute landmark location from the model probablity maps
Inputs:
prob : [RO, E1], the model produced probablity map for a landmark
thres : if np.max(prob)<thres, determine there is no landmark detected
mode : mean or max, use mean or max probablity to detect landmark
binary_mask : if true, prob is a binary (0 or 1) map
Outputs:
pt : [x, y], detected landmark point
"""
pt = None
if(binary_mask):
ind = np.where(prob==thres)
else:
if(thres>0 and np.max(prob)<thres):
return pt
else:
adaptive_thres = 0.5
mask = adaptive_thresh_cpu(prob, p_thresh=adaptive_thres*np.max(prob))
ind = np.where(mask>0)
if (np.size(ind[0])==0):
return pt
pt = np.zeros(2)
if(mode == "mean"):
pt[0] = np.mean(ind[1].astype(np.float32))
pt[1] = np.mean(ind[0].astype(np.float32))
else:
v = np.unravel_index(np.argmax(prob), prob.shape)
pt[0] = v[1]
pt[1] = v[0]
return pt | fad614088e587e389f15b0700bf442a956d498b0 | 16,107 |
import socket
def request(
url,
timeout: float,
method="GET",
data=None,
response_encoding="utf-8",
headers=None,
):
"""
Helper function to perform HTTP requests
"""
req = Request(url, data=data, method=method, headers=headers or {})
try:
return urlopen(req, timeout=timeout).read().decode(response_encoding)
except (URLError, socket.timeout, UnicodeDecodeError) as error:
raise CEPProviderUnavailableError(error) | 80f130101290442d538fa3f416f5650800547c6b | 16,108 |
from typing import Any
from typing import Optional
from typing import get_args
from typing import get_origin
def get_annotation_affiliation(annotation: Any, default: Any) -> Optional[Any]:
"""Helper for classifying affiliation of parameter
:param annotation: annotation record
:returns: classified value or None
"""
args, alias = get_args(annotation), get_origin(annotation)
# if alias and alias == list:
annotation = args[0] if alias == list else annotation
if annotation == Request:
return "request"
elif isinstance(default, (Form, File)):
return "form"
return None | db6efd7dfb0ed0272e7491547669de8f235b2b35 | 16,109 |
import os
import glob
def find_config_files(
path=['~/.vcspull'], match=['*'], filetype=['json', 'yaml'], include_home=False
):
"""Return repos from a directory and match. Not recursive.
Parameters
----------
path : list
list of paths to search
match : list
list of globs to search against
filetype: list
of filetypes to search against
include_home : bool
Include home configuration files
Raises
------
LoadConfigRepoConflict :
There are two configs that have same path and name with different repo urls.
Returns
-------
list :
list of absolute paths to config files.
"""
configs = []
if include_home is True:
configs.extend(find_home_config_files())
if isinstance(path, list):
for p in path:
configs.extend(find_config_files(p, match, filetype))
return configs
else:
path = os.path.expanduser(path)
if isinstance(match, list):
for m in match:
configs.extend(find_config_files(path, m, filetype))
else:
if isinstance(filetype, list):
for f in filetype:
configs.extend(find_config_files(path, match, f))
else:
match = os.path.join(path, match)
match += ".{filetype}".format(filetype=filetype)
configs = glob.glob(match)
return configs | 3138839e8914451a4138c3e24d375089c5c866b0 | 16,110 |
def sort_dict(original):
"""Recursively sorts dictionary keys and dictionary values in alphabetical order"""
if isinstance(original, dict):
res = (
dict()
) # Make a new "ordered" dictionary. No need for Collections in Python 3.7+
for k, v in sorted(original.items()):
res[k] = v
d = res
else:
d = original
for k in d:
if isinstance(d[k], str):
continue
if isinstance(d[k], list) and len(d[k]) > 1 and isinstance(d[k][0], str):
d[k] = sorted(d[k])
if isinstance(d[k], dict):
d[k] = sort_dict(d[k])
if isinstance(d[k], list) and len(d[k]) >= 1 and isinstance(d[k][0], dict):
for i in range(len(d[k])):
d[k][i] = sort_dict(d[k][i])
return d | 8c194af76160b0e4d3bad135720e051a4d4622b0 | 16,111 |
import requests
def playonyt(topic):
"""Will play video on following topic, takes about 10 to 15 seconds to load"""
url = 'https://www.youtube.com/results?q=' + topic
count = 0
cont = requests.get(url)
data = str(cont.content)
lst = data.split('"')
for i in lst:
count+=1
if i == 'WEB_PAGE_TYPE_WATCH':
break
if lst[count-5] == "/results":
raise Exception("No video found.")
#print("Videos found, opening most recent video")
web.open("https://www.youtube.com"+lst[count-5])
return "https://www.youtube.com"+lst[count-5] | 49f4285dc0e0086d30776fc0668bac0e4c19dbc5 | 16,112 |
def train_classifier(classifier, features, labels):
"""This function must concern itself with training the classifier
on the specified data."""
return classifier.fit(features, labels) | ef74548aeb6e245d8728caf3205163c249046aae | 16,113 |
def work_on_disk(dev, root_mb, swap_mb, image_path):
"""Creates partitions and write an image to the root partition."""
root_part = "%s-part1" % dev
swap_part = "%s-part2" % dev
if not is_block_device(dev):
LOG.warn(_("parent device '%s' not found"), dev)
return
make_partitions(dev, root_mb, swap_mb)
if not is_block_device(root_part):
LOG.warn(_("root device '%s' not found"), root_part)
return
if not is_block_device(swap_part):
LOG.warn(_("swap device '%s' not found"), swap_part)
return
dd(image_path, root_part)
mkswap(swap_part)
try:
root_uuid = block_uuid(root_part)
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to detect root device UUID."))
return root_uuid | 195ded6deae958b7efa41bcfdda1d3d68cabb23d | 16,114 |
def get_annotation_names(viewer):
"""Detect the names of nodes and edges layers"""
layer_nodes_name = None
layer_edges_name = None
for layer in viewer.layers:
if isinstance(layer, napari.layers.points.points.Points):
layer_nodes_name = layer.name
elif isinstance(layer, napari.layers.shapes.shapes.Shapes):
layer_edges_name = layer.name
if layer_nodes_name is not None and layer_edges_name is not None:
break
return layer_nodes_name, layer_edges_name | 20e64a6719b945eceda341d5a42da178818cb1a1 | 16,115 |
def remap(kx,ky,lx,ly,qomt,datai):
"""
remap the k-space variable back to shearing
periodic frame to reflect the time dependent
Eulerian wave number
"""
ndim = datai.ndim
dim = np.array(datai.shape)# datai[nz,ny,nx]
sh_data = np.empty([dim[0],dim[1],dim[2]])
tp_data = np.empty([dim[0],dim[2]])
sh_kx = -qomt*ky*lx/ly
#nquist= np.max(np.fabs(kx))
for j in np.arange(0,dim[1]):
quot = int(np.floor(sh_kx[j]))
res = sh_kx[j]-float(quot)
#kx_new = kx[:] + sh_kx[j]
tp_data[:,:]= datai[:,j,:]
sh_data[:,j,:] = (1.0-res)*np.roll(tp_data,quot, axis=1) \
+ res*np.roll(tp_data,quot+1,axis=1)
#sh_data[:,j,kx_new[:]>nquist] = 0.0
return sh_data | 6ea415df88c0db2ba26ef0fc8daa35b12a101ef8 | 16,116 |
def fips_disable():
"""
Disables FIPS on RH/CentOS system. Note that you must reboot the
system in order for FIPS to be disabled. This routine prepares
the system to disable FIPS.
CLI Example:
.. code-block:: bash
salt '*' ash.fips_disable
"""
installed_fips_pkgs = _get_installed_dracutfips_pkgs()
ret = { 'result': True }
old = {}
new = {}
try:
# Remove dracut-fips installations.
installed_fips_pkgs = _get_installed_dracutfips_pkgs()
if 'dracut-fips' in installed_fips_pkgs:
__salt__['pkg.remove']('dracut-fips')
old['Packages'] = installed_fips_pkgs
# If fips is in kernel, create a new boot-kernel.
if _is_fips_in_kernel():
_move_boot_kernel(False)
__salt__['cmd.run']("dracut -f", python_shell=False)
# Update grub.cfg file to remove the fips argument.
grub_args = _get_grub_args()
if 'fips=1' in grub_args:
cmd = 'grubby --update-kernel=ALL --remove-args=fips=1'
__salt__['cmd.run'](cmd, python_shell=False)
new['grubby'] = cmd
# Update GRUB command line entry to remove fips.
diff = _modify_grub_file(True)
if diff:
new['/etc/default/grub'] = diff
except Exception:
_rollback_fips_disable(installed_fips_pkgs)
ret['result'] = False
ret['changes'] = {}
ret['comment'] = 'Unable to change state of system to FIPS-disabled.'
else:
if old:
ret['changes'] = {'old': old}
ret['comment'] = 'FIPS has been toggled to off.'
if new:
if 'changes' in ret:
ret['changes'].update({'new': new})
else:
ret['changes'] = {'new': new}
ret['comment'] = 'FIPS has been toggled to off.'
if fips_status() == 'enabled':
msg = ' Reboot system to place into FIPS-disabled state.'
if 'comment' in ret:
ret['comment'] = ret['comment'] + msg
else:
ret['comment'] = msg[1:]
if 'changes' not in ret and 'comment' not in ret:
ret['comment'] = 'FIPS mode is already disabled. No changes.'
finally:
return ret | d31cc5ad6dd71ec0f3d238051a7b2a64b311c0fd | 16,117 |
import java.lang
import sys
def get_os_platform():
"""return platform name, but for Jython it uses os.name Java property"""
ver = sys.platform.lower()
if ver.startswith('java'):
ver = java.lang.System.getProperty("os.name").lower()
print('platform: %s' % (ver))
return ver | df717ae12fadf0ced75f4f1148ceed11701a7f25 | 16,118 |
from datetime import datetime
def buy_sell_fun_mp_org(datam, S1=1.0, S2=0.8):
"""
斜率指标交易策略标准分策略
"""
start_t = datetime.datetime.now()
print("begin-buy_sell_fun_mp:", start_t)
dataR = pd.DataFrame()
for code in datam.index.levels[1]:
# data = price.copy()
# price = datam.query("code=='%s'" % code)
# data = price.copy()
data = buy_sell_fun(datam, code)
# if code == '000732':
# print(data.tail(22))
if len(dataR) == 0:
dataR = data
else:
dataR = dataR.append(data)
end_t = datetime.datetime.now()
print(end_t, 'buy_sell_fun_mp spent:{}'.format((end_t - start_t)))
result01 = dataR['nav'].groupby(level=['date']).sum()
result02 = dataR['nav'].groupby(level=['date']).count()
num = dataR.flag.abs().sum()
dataR2 = pd.DataFrame({'nav':result01 - result02 + 1,'flag':0})
# dataR2['flag'] = 0
dataR2.iat[-1,1] = num
# result['nav'] = result['nav'] - len(datam.index.levels[1]) + 1
return dataR2 | 8d3b78b9d266c3c39b8491677caa0f4dfb9f839a | 16,119 |
import collections
import abc
def marshall_namedtuple(obj):
"""
This method takes any atomic value, list, dictionary or namedtuple,
and recursively it tries translating namedtuples into dictionaries
"""
recurse = lambda x: map(marshall_namedtuple, x)
obj_is = partial(isinstance, obj)
if hasattr(obj, '_marshall'):
return marshall_namedtuple(obj._marshall())
elif obj_is(tuple) and hasattr(obj, '_fields'): # namedtuple
fields = zip(obj._fields, recurse(obj))
class_name = obj.__class__.__name__
return dict(fields, **{'_type': class_name})
elif obj_is((collections.abc.Mapping,dict)):
return type(obj)(zip(obj.keys(), recurse(obj.values())))
elif obj_is(collections.abc.Iterable) and not obj_is(str):
return type(obj)(recurse(obj))
elif obj_is(abc.ABC):
return {
'_instance_of': obj.__class__.__name__
}
elif obj_is(abc.ABCMeta):
return {
'_class': obj.__name__
}
else:
return obj | 87d24fe1b273bfcf481679a96710be757baf08a5 | 16,120 |
import torch
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
# print("prepping images")
img = cv2.resize(img, (inp_dim, inp_dim))
img = img[:,:,::-1].transpose((2,0,1)).copy()
img = torch.from_numpy(img).float().div(255.0)
# print("prepped images")
return img | 02ebc73a32a24d59c53da9bfb99485f3a4f6dee2 | 16,121 |
from typing import Dict
from typing import List
import math
def best_broaders(supers_for_all_entities: Dict,
per_candidate_links_and_supers: List[Dict],
num_best: int = 5,
super_counts_field: str = "broader_counts",
doprint=False,
representativeness_threshold=0.1):
"""
Returns the best matching super for a candidate class, according to a list of supers for entities in the class
and entities in the whole corpus. If comparing to a taxonomy, a super is a broader.
@param super_counts_field:
@param super_counts: a dictionary that has, for every possible entity, the supers it belongs to
@param per_candidate_links_and_supers: a list of dictionaries, one per candidate. Fro each, at least
two fields are expected "entities" containing the list of entities, and that given by super_counts_field
which is, in turn, a dictionary whose keys are supers and whose values are the number of entities in that
candidate having this broad
@param num_best: maximum number of best matching supers to be returned
@return: for every candidate class, the num_best best matching supers and their log odds ratio
"""
result = []
global_counts = dict()
for ent, bros in supers_for_all_entities.items():
for bro in bros:
global_counts[bro] = global_counts.get(bro, 0) + 1
onlytopmost = []
for can in per_candidate_links_and_supers:
# For this entity, the following dictionaries have an element for every possible super
# Using notation from the paper
# T_cc : The number of entities narrower to a candidate which are tagged with NER typeT
T_cc = {x: y for x, y in can[super_counts_field].items()
if y > representativeness_threshold * len(can["entities"])}
if len(T_cc) == 0:
T_cc = {x: y for x, y in can[super_counts_field].items()}
# T_w : is the number of entities in the wholecorpus tagged with T
T_w = {y: global_counts[y] for y in T_cc.keys()}
# w : the total number of entities in the whole corpus
w = float(len(supers_for_all_entities))
# cc : the total number of entities in this candidate
cc = float(len(can["entities"]))
# dict of the form super : log_odds
log_odds_per_super = {x: math.log((T_cc[x] / cc) / (T_w[x] / w))
for x in T_cc.keys()}
logslist = list(log_odds_per_super.items())
logslist.sort(key=lambda x: x[1])
logslist.reverse()
maxbroads = min(len(logslist), num_best)
logodds = []
for bi in range(maxbroads):
logodds.append({"candidatesbroader": logslist[bi][0],
"loggods": logslist[bi][1]})
can["log_odds"] = logodds
if doprint:
print("\t\t---", ", ".join([str(x[1]) for x in logslist[:maxbroads]]))
if len(logslist) > 0:
onlytopmost.append(logslist[0][1])
can["best_match_broader"] = logslist[0][0]
else:
onlytopmost.append(None)
can["best_match_broader"] = None
return onlytopmost | 9aa9826c43e67a28eeca463b107296e093709246 | 16,122 |
def clump_list_sort(clump_list):
"""Returns a copy of clump_list, sorted by ascending minimum density. This
eliminates overlap when passing to
yt.visualization.plot_modification.ClumpContourCallback"""
minDensity = [c['Density'].min() for c in clump_list]
args = np.argsort(minDensity)
list = nar(clump_list)[args]
reverse = range(list.size-1,-1,-1)
return list[reverse] | 732e747e36c37f9d65ef44b6aa060d5c9d04e3d1 | 16,123 |
from typing import Dict
from typing import Any
from typing import Type
import types
from typing import Optional
def _prepare_artifact(
metadata_handler: metadata.Metadata,
uri: Text,
properties: Dict[Text, Any],
custom_properties: Dict[Text, Any],
reimport: bool, output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]
) -> types.Artifact:
"""Prepares the Importer's output artifact.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
An Artifact object representing the imported artifact.
"""
absl.logging.info(
'Processing source uri: %s, properties: %s, custom_properties: %s' %
(uri, properties, custom_properties))
# Check types of custom properties.
for key, value in custom_properties.items():
if not isinstance(value, (int, Text, bytes)):
raise ValueError(
('Custom property value for key %r must be a string or integer '
'(got %r instead)') % (key, value))
unfiltered_previous_artifacts = metadata_handler.get_artifacts_by_uri(
uri)
# Only consider previous artifacts as candidates to reuse, if the properties
# of the imported artifact match those of the existing artifact.
previous_artifacts = []
for candidate_mlmd_artifact in unfiltered_previous_artifacts:
is_candidate = True
candidate_artifact = output_artifact_class(mlmd_artifact_type)
candidate_artifact.set_mlmd_artifact(candidate_mlmd_artifact)
for key, value in properties.items():
if getattr(candidate_artifact, key) != value:
is_candidate = False
break
for key, value in custom_properties.items():
if isinstance(value, int):
if candidate_artifact.get_int_custom_property(key) != value:
is_candidate = False
break
elif isinstance(value, (Text, bytes)):
if candidate_artifact.get_string_custom_property(key) != value:
is_candidate = False
break
if is_candidate:
previous_artifacts.append(candidate_mlmd_artifact)
result = output_artifact_class(mlmd_artifact_type)
result.uri = uri
for key, value in properties.items():
setattr(result, key, value)
for key, value in custom_properties.items():
if isinstance(value, int):
result.set_int_custom_property(key, value)
elif isinstance(value, (Text, bytes)):
result.set_string_custom_property(key, value)
# If a registered artifact has the same uri and properties and the user does
# not explicitly ask for reimport, reuse that artifact.
if bool(previous_artifacts) and not reimport:
absl.logging.info('Reusing existing artifact')
result.set_mlmd_artifact(max(previous_artifacts, key=lambda m: m.id))
return result | d30dcd579c73c71173f10207ab80d05c761c7185 | 16,124 |
import re
def ParseCLILines(lines, skipStartLines=0, lastSkipLineRe=None, skipEndLines=0):
"""Delete first few and last few lines in an array"""
if skipStartLines > 0:
if lastSkipLineRe != None:
# sanity check. Make sure last line to skip matches the given regexp
if None == re.match(lastSkipLineRe, lines[(skipStartLines-1)]):
raise exceptions.MalformedIO("Expected '%s' at line %d of result, but found '%s'." % (lastSkipLineRe, skipStartLines, lines[(skipStartLines-1)].strip()))
if len(lines) < skipStartLines:
raise exceptions.MalformedIO("Can't skip first %d lines of result %s. It only contains %d lines." % (skipStartLines, repr(lines), len(lines)))
del lines[0:skipStartLines]
if skipEndLines > 0:
if len(lines) < skipEndLines:
raise exceptions.MalformedIO("Can't skip last %d lines of result %s. It only contains %d lines." % (skipEndLines, repr(lines), len(lines)))
del lines[-skipEndLines:]
return lines | dc445765f42df25b8d046e3f2303d85109a3d419 | 16,125 |
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
### END CODE HERE ###
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters | 785985b79b9284ba8c6058c8e9c4018955407cf8 | 16,126 |
import os
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source | 5ecaeddf2c3941bbfb0d89ee902a961f7aeab838 | 16,127 |
from datetime import datetime
def get_df_from_sampled_trips(step_trip_list, show_service_data=False, earliest_datetime=None):
"""Get dataframe from sampled trip list.
Parameters
----------
step_trip_list : list of lists
List of trip lists occuring in the same step.
show_service_data: bool
Show trip pickup and dropoff results.
earliest_datetime: datetime
Trip start time - rebalance offset
Returns
-------
DataFrame
Dataframe with trip data info.
"""
d = defaultdict(list)
for step, trips in enumerate(step_trip_list):
for t in trips:
d["placement_datetime"].append(t.placement)
d["step"].append(step + 1)
d["pk_id"].append(t.o.id)
d["dp_id"].append(t.d.id)
d["sq_class"].append(t.sq_class)
d["max_delay"].append(t.max_delay)
d["elapsed_sec"].append(t.elapsed_sec)
d["max_delay_from_placement"].append(t.max_delay_from_placement)
d["delay_close_step"].append(t.delay_close_step)
d["tolerance"].append(t.tolerance)
lon_o, lat_o = nw.tenv.lonlat(t.o.id)
lon_d, lat_d = nw.tenv.lonlat(t.d.id)
d["passenger_count"].append(1)
d["pickup_latitude"].append(lat_o)
d["pickup_longitude"].append(lon_o)
d["dropoff_latitude"].append(lat_d)
d["dropoff_longitude"].append(lon_d)
if show_service_data:
if t.pk_delay is not None:
pickup_datetime = t.placement + timedelta(
minutes=t.pk_delay
)
pickup_datetime_str = datetime.strftime(
pickup_datetime, "%Y-%m-%d %H:%M:%S"
)
if t.dropoff_time is not None:
dropoff_datetime = earliest_datetime + timedelta(
minutes=t.dropoff_time
)
dropoff_datetime_str = datetime.strftime(
dropoff_datetime, "%Y-%m-%d %H:%M:%S"
)
d["times_backlogged"].append(t.times_backlogged)
d["pickup_step"].append(
t.pk_step if t.pk_step is not None else "-"
)
d["dropoff_step"].append(
t.dp_step if t.dp_step is not None else "-"
)
d["pickup_delay"].append(
t.pk_delay if t.pk_delay is not None else "-"
)
d["pickup_duration"].append(
t.pk_duration if t.pk_duration is not None else "-"
)
d["pickup_datetime"].append(
pickup_datetime_str if t.pk_delay is not None else "-"
)
d["dropoff_time"].append(
t.dropoff_time if t.dropoff_time is not None else "-"
)
d["dropoff_datetime"].append(
dropoff_datetime_str if t.dropoff_time is not None else "-"
)
d["picked_by"].append(t.picked_by)
df = pd.DataFrame.from_dict(dict(d))
df.sort_values(by=["placement_datetime", "sq_class"], inplace=True)
return df | 36bba80f0c46862df0390cf4e4279eeb33002e86 | 16,128 |
def compute_v_y(transporter, particles):
"""
Compute values of V y on grid specified in bunch configuration
:param transporter: transport function
:param particles: BunchConfiguration object, specification of grid
:return: matrix with columns: x, theta_x, y, theta_y, pt, V y
"""
return __compute_optical_function(transporter, particles, Parameters.V_Y) | 4a17e0c0e4612534483187b6779bcf5c179c0fcc | 16,129 |
def gabor_kernel_nodc(frequency, theta=0, bandwidth=1, gamma=1,
n_stds=3, offset=0):
"""
Return complex 2D Gabor filter kernel with no DC offset.
This function is a modification of the gabor_kernel function of scikit-image
Gabor kernel is a Gaussian kernel modulated by a complex harmonic function.
Harmonic function consists of an imaginary sine function and a real
cosine function. Spatial frequency is inversely proportional to the
wavelength of the harmonic and to the standard deviation of a Gaussian
kernel. The bandwidth is also inversely proportional to the standard
deviation.
Parameters
----------
frequency : float
Spatial frequency of the harmonic function. Specified in pixels.
theta : float, optional
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float, optional
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
gamma : float, optional
gamma changes the aspect ratio (ellipsoidal) of the gabor filter.
By default, gamma=1 which means no aspect ratio (circle)
if gamma>1, the filter is larger (x-dir)
if gamma<1, the filter is higher (y-dir)
This value is ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float, optional
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
n_stds : scalar, optional
The linear size of the kernel is n_stds (3 by default) standard
deviations
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
g_nodc : complex 2d array
A single gabor kernel (complex) with no DC offset
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
Examples
--------
>>> from skimage.filters import gabor_kernel
>>> from skimage import io
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> gk = gabor_kernel(frequency=0.2)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
>>> # more ripples (equivalent to increasing the size of the
>>> # Gaussian spread)
>>> gk = gabor_kernel(frequency=0.2, bandwidth=0.1)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
"""
# set gaussian parameters
b = bandwidth
sigma_pref = 1.0 / np.pi * np.sqrt(np.log(2) / 2.0) * (2.0 ** b + 1) / (2.0 ** b - 1)
sigma_y = sigma_pref / frequency
sigma_x = sigma_y/gamma
# meshgrid
x0 = np.ceil(max(np.abs(n_stds * sigma_x * np.cos(theta)),
np.abs(n_stds * sigma_y * np.sin(theta)), 1))
y0 = np.ceil(max(np.abs(n_stds * sigma_y * np.cos(theta)),
np.abs(n_stds * sigma_x * np.sin(theta)), 1))
y, x = np.mgrid[-y0:y0 + 1, -x0:x0 + 1]
# rotation matrix
rotx = x * np.cos(theta) + y * np.sin(theta)
roty = -x * np.sin(theta) + y * np.cos(theta)
# combine gambor and
g = np.zeros(y.shape, dtype=np.complex)
g[:] = np.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
g /= 2 * np.pi * sigma_x * sigma_y # gaussian envelope
oscil = np.exp(1j * (2 * np.pi * frequency * rotx + offset)) # harmonic / oscilatory function
g_dc = g*oscil
# remove dc component by subtracting the envelope weighted by K
K = np.sum(g_dc)/np.sum(g)
g_nodc = g_dc - K*g
return g_nodc | f725561a1eb56b6e23c1046c33b0abec49201122 | 16,130 |
def run_fn(fn_args: TrainerFnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
# get transform component output
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
# read input data
train_dataset = fn_args.data_accessor.tf_dataset_factory(
fn_args.train_files,
dataset_options.TensorFlowDatasetOptions(
batch_size=fn_args.custom_config["batch_size"],
),
tf_transform_output.transformed_metadata.schema,
)
eval_dataset = fn_args.data_accessor.tf_dataset_factory(
fn_args.eval_files,
dataset_options.TensorFlowDatasetOptions(
batch_size=fn_args.custom_config["batch_size"],
),
tf_transform_output.transformed_metadata.schema,
)
# instantiate model
model = build_model(
fn_args.custom_config["input_features"],
fn_args.custom_config["window_size"],
fn_args.custom_config["outer_units"],
fn_args.custom_config["inner_units"],
)
# tf callbacks for tensorboard
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir,
update_freq="batch",
)
# validation_data = list(eval_dataset.as_numpy_iterator())
# train model
model.fit(
train_dataset,
# train_dataset.as_numpy_iterator(),
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback],
)
# Build signatures
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def _serve_tf_examples_fn(**input_features):
# """Returns the output to be used in the serving signature."""
preprocessed_features = model.tft_layer(input_features)
autoencoded_features = model(preprocessed_features)
return {
**{
f"input_features::{f}": input_features[f] for f in input_features.keys()
},
**{
f"preprocessed_features::{f}": preprocessed_features[f]
for f in preprocessed_features.keys()
},
# Output tensor names are of the form:
# lstm_autoencoder_model/decoder/{feature_name}/Reshape_1:0
**{
f"output_features::{f.name.split('/')[2]}": f
for f in autoencoded_features
},
}
_input_tf_specs = {
f: tf.TensorSpec(
shape=[None, fn_args.custom_config["window_size"]], dtype=tf.float32, name=f
)
for f in fn_args.custom_config["input_features"]
}
signatures = {
"serving_default": _serve_tf_examples_fn.get_concrete_function(
**_input_tf_specs
)
}
# Save model (this is the effective output of this function)
model.save(fn_args.serving_model_dir, save_format="tf", signatures=signatures) | 15c8202ad6955052bbd1da2984aedb9887c390af | 16,131 |
def log_likelihood(X, Y, Z, data, boolean=True, **kwargs):
"""
Log likelihood ratio test for conditional independence. Also commonly known
as G-test, G-squared test or maximum likelihood statistical significance
test. Tests the null hypothesis that X is independent of Y given Zs.
Parameters
----------
X: int, string, hashable object
A variable name contained in the data set
Y: int, string, hashable object
A variable name contained in the data set, different from X
Z: list (array-like)
A list of variable names contained in the data set, different from X and Y.
This is the separating set that (potentially) makes X and Y independent.
Default: []
data: pandas.DataFrame
The dataset on which to test the independence condition.
boolean: bool
If boolean=True, an additional argument `significance_level` must
be specified. If p_value of the test is greater than equal to
`significance_level`, returns True. Otherwise returns False.
If boolean=False, returns the chi2 and p_value of the test.
Returns
-------
If boolean = False, Returns 3 values:
chi: float
The chi-squre test statistic.
p_value: float
The p_value, i.e. the probability of observing the computed chi-square
statistic (or an even higher value), given the null hypothesis
that X \u27C2 Y | Zs.
dof: int
The degrees of freedom of the test.
If boolean = True, returns:
independent: boolean
If the p_value of the test is greater than significance_level, returns True.
Else returns False.
References
----------
[1] https://en.wikipedia.org/wiki/G-test
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(50000, 4)), columns=list('ABCD'))
>>> data['E'] = data['A'] + data['B'] + data['C']
>>> log_likelihood(X='A', Y='C', Z=[], data=data, boolean=True, significance_level=0.05)
True
>>> log_likelihood(X='A', Y='B', Z=['D'], data=data, boolean=True, significance_level=0.05)
True
>>> log_likelihood(X='A', Y='B', Z=['D', 'E'], data=data, boolean=True, significance_level=0.05)
False
"""
return power_divergence(
X=X, Y=Y, Z=Z, data=data, boolean=boolean, lambda_="log-likelihood", **kwargs
) | 00493131d78506c5a6cbb9e04bda51b69f1a04ca | 16,132 |
def conjugada_matriz_vec(mat:list):
"""
Funcion que realiza la conjugada de una matriz o vector complejo.
:param mat: Lista que representa la matriz o vector complejo.
:return: lista que representa la matriz o vector resultante.
"""
fila = len(mat)
columnas = len(mat[0])
resul = []
for i in range(fila):
resul.append([])
for j in range(columnas):
resul[i].append(conjugado_complejos(mat[i][j]))
return resul | ad883dae9161f4e60f933caf93703544e16bfb4d | 16,133 |
def features2matrix(feature_list):
"""
Args:
feature_list (list of Feature):
Returns:
(np.ndarray, list of str): matrix and list of key of features
"""
matrix = np.array([feature.values for feature in feature_list], dtype=float)
key_lst = [feature.key for feature in feature_list]
return matrix, key_lst | f60cdb904489cca3ab926dbc8d396804367e4a7a | 16,134 |
import os
def GenDataFrameFromPath(path, pattern='*.png', fs=False):
"""
generate a dataframe for all file in a dir with the specific pattern of file name.
use: GenDataFrameFromPath(path, pattern='*.png')
"""
fnpaths = list(path.glob(pattern))
df = pd.DataFrame(dict(zip(['fnpath'], [fnpaths])))
df['dir'] = df['fnpath'].apply(lambda x: x.parent)
df['fn'] = df['fnpath'].apply(lambda x: x.name)
if fs:
df['size'] = df['fnpath'].apply(lambda x: os.path.getsize(x))
return df | 899026131fe8eb18a2c5c6cf0df6a4ebfa287986 | 16,135 |
import re
def is_heading(line):
"""Determine whether a given line is a section header
that describes subsequent lines of a report.
"""
has_cattle = re.search(r'steer?|hfrs?|calves|cows?|bulls?', line, re.IGNORECASE)
has_price = re.search(r'\$[0-9]+\.[0-9]{2}', line)
return bool(has_cattle) and not bool(has_price) | ccbc80f7db61f7ba82aa88e54112d1995d457764 | 16,136 |
def get_channel_messages(channel_id):
""" Holt fuer einen bestimmten Kanal die Nachrichten aus der Datenbank"""
session = get_cassandra_session()
future = session.execute_async("SELECT * FROM messages WHERE channel_id=%s", (channel_id,))
try:
rows = future.result()
except Exception:
log.exeception()
messages = []
for row in rows:
messages.append({
'channel_id': row.channel_id,
'message_id': row.message_id,
'author_id': row.author_id,
'message': row.message
})
return jsonify({'messages': messages}), 200 | 7a3821dd8e93c4d49dfeecea200a881fdcb3f1a4 | 16,137 |
def train(traj,
pol, targ_pol, qf, targ_qf,
optim_pol, optim_qf,
epoch, batch_size, # optimization hypers
tau, gamma, # advantage estimation
sampling,
):
"""
Train function for deep deterministic policy gradient
Parameters
----------
traj : Traj
Off policy trajectory.
pol : Pol
Policy.
targ_pol : Pol
Target Policy.
qf : SAVfunction
Q function.
targ_qf : SAVfunction
Target Q function.
optim_pol : torch.optim.Optimizer
Optimizer for Policy.
optim_qf : torch.optim.Optimizer
Optimizer for Q function.
epoch : int
Number of iteration.
batch_size : int
Number of batches.
tau : float
Target updating rate.
gamma : float
Discounting rate.
sampling : int
Number of samping in calculating expectation.
Returns
-------
result_dict : dict
Dictionary which contains losses information.
"""
pol_losses = []
qf_losses = []
logger.log("Optimizing...")
for batch in traj.iterate(batch_size, epoch):
qf_bellman_loss = lf.bellman(
qf, targ_qf, targ_pol, batch, gamma, sampling=sampling)
optim_qf.zero_grad()
qf_bellman_loss.backward()
optim_qf.step()
pol_loss = lf.ag(pol, qf, batch, sampling)
optim_pol.zero_grad()
pol_loss.backward()
optim_pol.step()
for q, targ_q, p, targ_p in zip(qf.parameters(), targ_qf.parameters(), pol.parameters(), targ_pol.parameters()):
targ_p.detach().copy_((1 - tau) * targ_p.detach() + tau * p.detach())
targ_q.detach().copy_((1 - tau) * targ_q.detach() + tau * q.detach())
qf_losses.append(qf_bellman_loss.detach().cpu().numpy())
pol_losses.append(pol_loss.detach().cpu().numpy())
logger.log("Optimization finished!")
return dict(PolLoss=pol_losses,
QfLoss=qf_losses,
) | 14c09f3ce1f30366be3b8d0e0b965bdc1c677834 | 16,138 |
import os
def upsample_gtiff(files: list, scale: float) -> list:
"""
Performs array math to artificially increase the resolution of a geotiff. No interpolation of values. A scale
factor of X means that the length of a horizontal and vertical grid cell decreases by X. Be careful, increasing the
resolution by X increases the file size by ~X^2
Args:
files: A list of absolute paths to the appropriate type of files (even if len==1)
scale: A positive integer used as the multiplying factor to increase the resolution.
Returns:
list of paths to the geotiff files created
"""
# Read raster dimensions
raster_dim = rasterio.open(files[0])
width = raster_dim.width
height = raster_dim.height
lon_min = raster_dim.bounds.left
lon_max = raster_dim.bounds.right
lat_min = raster_dim.bounds.bottom
lat_max = raster_dim.bounds.top
# Geotransform for each resampled raster (east, south, west, north, width, height)
affine_resampled = rasterio.transform.from_bounds(lon_min, lat_min, lon_max, lat_max, width * scale, height * scale)
# keep track of the new files
new_files = []
# Resample each GeoTIFF
for file in files:
rio_obj = rasterio.open(file)
data = rio_obj.read(
out_shape=(int(rio_obj.height * scale), int(rio_obj.width * scale)),
resampling=Resampling.nearest
)
# Convert new resampled array from 3D to 2D
data = np.squeeze(data, axis=0)
# Specify the filepath of the resampled raster
new_filepath = os.path.splitext(file)[0] + '_upsampled.tiff'
new_files.append(new_filepath)
# Save the GeoTIFF
with rasterio.open(
new_filepath,
'w',
driver='GTiff',
height=data.shape[0],
width=data.shape[1],
count=1,
dtype=data.dtype,
nodata=np.nan,
crs=rio_obj.crs,
transform=affine_resampled,
) as dst:
dst.write(data, 1)
return new_files | 93e158d4beae9d4d179e9908e6dce639de1770d3 | 16,139 |
def merge_dicts(dict1, dict2):
""" _merge_dicts
Merges two dictionaries into one.
INPUTS
@dict1 [dict]: First dictionary to merge.
@dict2 [dict]: Second dictionary to merge.
RETURNS
@merged [dict]: Merged dictionary
"""
merged = {**dict1, **dict2}
return merged | 67e96ba9c9831e6e2aa4bbd6cd8b8d1d5edb93c4 | 16,140 |
import pagure.api
import pagure.lib.query
def check_api_acls(acls, optional=False):
"""Checks if the user provided an API token with its request and if
this token allows the user to access the endpoint desired.
:arg acls: A list of access control
:arg optional: Only check the API token is valid. Skip the ACL validation.
"""
if authenticated():
return
flask.g.token = None
flask.g.fas_user = None
token = None
token_str = None
if "Authorization" in flask.request.headers:
authorization = flask.request.headers["Authorization"]
if "token" in authorization:
token_str = authorization.split("token", 1)[1].strip()
token_auth = False
error_msg = None
if token_str:
token = pagure.lib.query.get_api_token(flask.g.session, token_str)
if token:
if token.expired:
error_msg = "Expired token"
else:
flask.g.authenticated = True
# Some ACLs are required
if acls:
token_acls_set = set(token.acls_list)
needed_acls_set = set(acls or [])
overlap = token_acls_set.intersection(needed_acls_set)
# Our token has some of the required ACLs: auth successful
if overlap:
token_auth = True
flask.g.fas_user = token.user
# To get a token, in the `fas` auth user must have
# signed the CLA, so just set it to True
flask.g.fas_user.cla_done = True
flask.g.token = token
flask.g.authenticated = True
# Our token has none of the required ACLs -> auth fail
else:
error_msg = "Missing ACLs: %s" % ", ".join(
sorted(set(acls) - set(token.acls_list))
)
# No ACL required
else:
if optional:
token_auth = True
flask.g.fas_user = token.user
# To get a token, in the `fas` auth user must have
# signed the CLA, so just set it to True
flask.g.fas_user.cla_done = True
flask.g.token = token
flask.g.authenticated = True
else:
error_msg = "Invalid token"
elif optional:
return
else:
error_msg = "Invalid token"
if not token_auth:
output = {
"error_code": pagure.api.APIERROR.EINVALIDTOK.name,
"error": pagure.api.APIERROR.EINVALIDTOK.value,
"errors": error_msg,
}
jsonout = flask.jsonify(output)
jsonout.status_code = 401
return jsonout | 81d658036c5b31e3471e48ef44f4eb26e571c49a | 16,141 |
import os
def get_data():
"""
Return data files
:return:
"""
data = {}
for df in get_manifest():
d, f = os.path.split(df)
if d not in data:
data[d] = [df]
else:
data[d].append(df)
return list(data.items()) | 3add894e03e153dc82aebcaca3a0099ac98b0a1c | 16,142 |
def china_province_head_fifteen():
"""
各省前15数据
:return:
"""
return db_request_service.get_china_province_head_fifteen(ChinaTotal, ChinaProvince) | 18dc3f22c05b3580bcd983361efc03bd3cdae43b | 16,143 |
import tempfile
import os
from datetime import datetime
import glob
import shutil
def exec_sedml_docs_in_archive(sed_doc_executer, archive_filename, out_dir, apply_xml_model_changes=False,
sed_doc_executer_supported_features=(Task, Report, DataSet, Plot2D, Curve, Plot3D, Surface),
report_formats=None, plot_formats=None,
bundle_outputs=None, keep_individual_outputs=None,
sed_doc_executer_logged_features=(Task, Report, DataSet, Plot2D, Curve, Plot3D, Surface)):
""" Execute the SED-ML files in a COMBINE/OMEX archive (execute tasks and save outputs)
Args:
sed_doc_executer (:obj:`types.FunctionType`): function to execute each SED document in the archive.
The function must implement the following interface::
def sed_doc_executer(doc, working_dir, base_out_path, rel_out_path=None,
apply_xml_model_changes=False, report_formats=None, plot_formats=None,
log=None, indent=0):
''' Execute the tasks specified in a SED document and generate the specified outputs
Args:
doc (:obj:`SedDocument` of :obj:`str`): SED document or a path to SED-ML file which defines a SED document
working_dir (:obj:`str`): working directory of the SED document (path relative to which models are located)
out_path (:obj:`str`): path to store the outputs
* CSV: directory in which to save outputs to files
``{out_path}/{rel_out_path}/{report.id}.csv``
* HDF5: directory in which to save a single HDF5 file (``{out_path}/reports.h5``),
with reports at keys ``{rel_out_path}/{report.id}`` within the HDF5 file
rel_out_path (:obj:`str`, optional): path relative to :obj:`out_path` to store the outputs
apply_xml_model_changes (:obj:`bool`, optional): if :obj:`True`, apply any model changes specified in the SED-ML file
report_formats (:obj:`list` of :obj:`ReportFormat`, optional): report format (e.g., csv or h5)
plot_formats (:obj:`list` of :obj:`PlotFormat`, optional): plot format (e.g., pdf)
log (:obj:`SedDocumentLog`, optional): execution status of document
indent (:obj:`int`, optional): degree to indent status messages
'''
archive_filename (:obj:`str`): path to COMBINE/OMEX archive
out_dir (:obj:`str`): path to store the outputs of the archive
* CSV: directory in which to save outputs to files
``{ out_dir }/{ relative-path-to-SED-ML-file-within-archive }/{ report.id }.csv``
* HDF5: directory in which to save a single HDF5 file (``{ out_dir }/reports.h5``),
with reports at keys ``{ relative-path-to-SED-ML-file-within-archive }/{ report.id }`` within the HDF5 file
apply_xml_model_changes (:obj:`bool`): if :obj:`True`, apply any model changes specified in the SED-ML files before
calling :obj:`task_executer`.
sed_doc_executer_supported_features (:obj:`list` of :obj:`type`, optional): list of the types of elements that the
SED document executer supports. Default: tasks, reports, plots, data sets, curves, and surfaces.
report_formats (:obj:`list` of :obj:`ReportFormat`, optional): report format (e.g., csv or h5)
plot_formats (:obj:`list` of :obj:`PlotFormat`, optional): report format (e.g., pdf)
bundle_outputs (:obj:`bool`, optional): if :obj:`True`, bundle outputs into archives for reports and plots
keep_individual_outputs (:obj:`bool`, optional): if :obj:`True`, keep individual output files
sed_doc_executer_logged_features (:obj:`list` of :obj:`type`, optional): list of the types fo elements which that
the SED document executer logs. Default: tasks, reports, plots, data sets, curves, and surfaces.
Returns:
:obj:`CombineArchiveLog`: log
"""
config = get_config()
# process arguments
if report_formats is None:
report_formats = [ReportFormat(format_value) for format_value in config.REPORT_FORMATS]
if plot_formats is None:
plot_formats = [PlotFormat(format_value) for format_value in config.PLOT_FORMATS]
if bundle_outputs is None:
bundle_outputs = config.BUNDLE_OUTPUTS
if keep_individual_outputs is None:
keep_individual_outputs = config.KEEP_INDIVIDUAL_OUTPUTS
verbose = config.VERBOSE
# create temporary directory to unpack archive
archive_tmp_dir = tempfile.mkdtemp()
# unpack archive and read metadata
archive = CombineArchiveReader.run(archive_filename, archive_tmp_dir)
# determine files to execute
sedml_contents = get_sedml_contents(archive)
if not sedml_contents:
warn("COMBINE/OMEX archive '{}' does not contain any executing SED-ML files".format(archive_filename), NoSedmlWarning)
# print summary of SED documents
print(get_summary_sedml_contents(archive, archive_tmp_dir))
# create output directory
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
# initialize status and output
supported_features = sed_doc_executer_supported_features
logged_features = sed_doc_executer_logged_features
if SedDocument not in supported_features:
supported_features = tuple(list(supported_features) + [SedDocument])
if SedDocument not in logged_features:
logged_features = tuple(list(logged_features) + [SedDocument])
log = init_combine_archive_log(archive, archive_tmp_dir,
supported_features=supported_features,
logged_features=logged_features)
log.status = Status.RUNNING
log.out_dir = out_dir
log.export()
start_time = datetime.datetime.now()
# execute SED-ML files: execute tasks and save output
exceptions = []
for i_content, content in enumerate(sedml_contents):
content_filename = os.path.join(archive_tmp_dir, content.location)
content_id = os.path.relpath(content_filename, archive_tmp_dir)
print('Executing SED-ML file {}: {} ...'.format(i_content, content_id))
doc_log = log.sed_documents[content_id]
doc_log.status = Status.RUNNING
doc_log.export()
with capturer.CaptureOutput(merged=True, relay=verbose) as captured:
doc_start_time = datetime.datetime.now()
try:
working_dir = os.path.dirname(content_filename)
sed_doc_executer(content_filename,
working_dir,
out_dir,
os.path.relpath(content_filename, archive_tmp_dir),
apply_xml_model_changes=apply_xml_model_changes,
report_formats=report_formats,
plot_formats=plot_formats,
log=doc_log,
indent=1)
doc_log.status = Status.SUCCEEDED
except Exception as exception:
exceptions.append(exception)
doc_log.status = Status.FAILED
doc_log.exception = exception
# update status
doc_log.output = captured.get_bytes().decode()
doc_log.duration = (datetime.datetime.now() - doc_start_time).total_seconds()
doc_log.export()
print('')
if bundle_outputs:
print('Bundling outputs ...')
# bundle CSV files of reports into zip archive
archive_paths = [os.path.join(out_dir, '**', '*.' + format.value) for format in report_formats if format != ReportFormat.h5]
archive = build_archive_from_paths(archive_paths, out_dir)
if archive.files:
ArchiveWriter().run(archive, os.path.join(out_dir, config.REPORTS_PATH))
# bundle PDF files of plots into zip archive
archive_paths = [os.path.join(out_dir, '**', '*.' + format.value) for format in plot_formats]
archive = build_archive_from_paths(archive_paths, out_dir)
if archive.files:
ArchiveWriter().run(archive, os.path.join(out_dir, config.PLOTS_PATH))
# cleanup temporary files
print('Cleaning up ...')
if not keep_individual_outputs:
path_patterns = (
[os.path.join(out_dir, '**', '*.' + format.value) for format in report_formats if format != ReportFormat.h5]
+ [os.path.join(out_dir, '**', '*.' + format.value) for format in plot_formats]
)
for path_pattern in path_patterns:
for path in glob.glob(path_pattern, recursive=True):
os.remove(path)
for dir_path, dir_names, file_names in os.walk(out_dir, topdown=False):
for dir_name in list(dir_names):
full_dir_name = os.path.join(dir_path, dir_name)
if not os.path.isdir(full_dir_name):
dir_names.remove(dir_name)
elif not os.listdir(full_dir_name):
# not reachable because directory would
# have already been removed by the iteration for the directory
shutil.rmtree(full_dir_name) # pragma: no cover
dir_names.remove(dir_name) # pragma: no cover
if not dir_names and not file_names:
shutil.rmtree(dir_path)
shutil.rmtree(archive_tmp_dir)
# update status
log.status = Status.FAILED if exceptions else Status.SUCCEEDED
log.duration = (datetime.datetime.now() - start_time).total_seconds()
log.finalize()
log.export()
# summarize execution
print('')
print('============= SUMMARY =============')
print(get_summary_combine_archive_log(log))
# raise exceptions
if exceptions:
msg = 'The COMBINE/OMEX did not execute successfully:\n\n {}'.format(
'\n\n '.join(str(exceptions).replace('\n', '\n ') for exceptions in exceptions))
raise CombineArchiveExecutionError(msg)
# return log
return log | 508ac3f83e7365c58f830081bf16470e0ad43e43 | 16,144 |
def pipeline(x_train,
y_train,
x_test,
y_test,
param_dict=None,
problem='classification'):
"""Trains and evaluates a DNN classifier.
Args:
x_train: np.array or scipy.sparse.*matrix array of features of training data
y_train: np.array 1-D array of class labels of training data
x_test: np.array or scipy.sparse.*matrix array of features of test data
y_test: np.array 1-D array of class labels of the test data
param_dict: {string: ?} dictionary of parameters of their values
problem: string type of learning problem; values = 'classification',
'regression'
Returns:
model: Keras.models.Model
trained Keras model
metrics: {str: float}
dictionary of metric scores
"""
assert problem in ['classification', 'regression']
if param_dict is None:
param_dict = {'epochs': 10, 'batch_size': 256}
num_feature = x_train.shape[1]
is_sparse = sparse.issparse(x_train)
param_dict = param_dict.copy()
num_epoch = param_dict.pop('epochs')
batch_size = param_dict.pop('batch_size')
if problem == 'regression':
num_output = 1
loss = 'mean_squared_error'
model_init = KerasRegressor
else:
num_output = len(set(y_train))
loss = 'categorical_crossentropy'
model_init = FunctionalKerasClassifier
build_fn = pseudo_partial(
keras_build_fn,
num_feature=num_feature,
num_output=num_output,
is_sparse=is_sparse,
loss=loss,
**param_dict)
model = model_init(
build_fn=build_fn,
epochs=num_epoch,
batch_size=batch_size,
shuffle=True,
verbose=False)
return generic_pipeline(
model, x_train, y_train, x_test, y_test, problem=problem) | f01e20851c91dd9f6b3db889fdf713edc1eb37b9 | 16,145 |
def model_selection(modelname, num_out_classes,
dropout=None):
"""
:param modelname:
:return: model, image size, pretraining<yes/no>, input_list
"""
if modelname == 'xception':
return TransferModel(modelchoice='xception',
num_out_classes=num_out_classes)
# , 299, \True, ['image'], None
elif modelname == 'resnet18':
return TransferModel(modelchoice='resnet18', dropout=dropout,
num_out_classes=num_out_classes)
# , \224, True, ['image'], None
elif modelname == 'xception_concat':
return TransferModel(modelchoice='xception_concat',
num_out_classes=num_out_classes)
else:
raise NotImplementedError(modelname) | 67ba26ab4f7cbe8f4540eb10f2ef6e598b49ea2f | 16,146 |
def Packet_computeBinaryPacketLength(startOfPossibleBinaryPacket):
"""Packet_computeBinaryPacketLength(char const * startOfPossibleBinaryPacket) -> size_t"""
return _libvncxx.Packet_computeBinaryPacketLength(startOfPossibleBinaryPacket) | 58139b8d874d9292e63b6eb6afdbd9c5c2fa6f9d | 16,147 |
def build_check_query(check_action: Action) -> str:
"""Builds check query from action item
Parameters
----------
check_action : action
check action to build query from
Returns
-------
str
query to execute
"""
return f"""
UPDATE todos
SET completed = 1
WHERE name LIKE '{check_action.pattern}';
""" | a6b8f5b328e3bb9eedf61a325e54d6cae9704a55 | 16,148 |
import itertools
def gen_positions(n, n_boulders):
"""Generates state codes for boulders. Includes empty rows
Parameters:
n: number of rows/columns
n_boulders: number of boulders per row
return value:
Possible boulder and alien states
"""
boulder_positions=[]; b_p=[]
alien_positions_with_0=["{}1{}".format('0'*(n-i-1),'0'*(i)) for i in range(n)]+['0'*n]
if n_boulders==1:
return alien_positions_with_0, alien_positions_with_0[0:n]
else:
positions=[]
position_index=list(itertools.combinations(range(n), n_boulders))
for tup in position_index:
pos=''
for i in range(n):
if i in tup:
pos+='1'
else:
pos+='0'
positions.append(pos)
if '0'*n not in boulder_positions:
positions.append('0'*n)
return positions, alien_positions_with_0[0:n] | 0a20594f2e021bf8e190f6c7c726159fde0b8367 | 16,149 |
def analysis_linear_correlation(data1:np.array,
data2:np.array,
alpha:float = .05,
return_corr:bool = True,
verbose:bool = False)->bool:
"""
## Linear correlation analysis to test independence for numerical / ordinal variables.
data1, date2 -- 1D data to be tested.
alpha -- Significance level (default, 0.05).
return_corr -- If is True, return correlation value and his p-value (default, False).
verbose -- Display extra information (default, False).
return -- boolean according test result.
"""
# get types
type1 = data1.dtype
type2 = data2.dtype
# get size
n = len(data1)
# ord - ord
if type1 == "int64" and type2 == "int64":
# number of categories
ncat1 = len(np.unique(data1))
ncat2 = len(np.unique(data2))
# analysis
if ncat1 >= 5 and ncat2 >= 5:
result = correlation_spearman(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
result = correlation_kendalltau(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
# num - num
if type1 == "float64" and type2 == "float64":
# test if variables are gaussian
if n >= 5000:
is_normal1 = test_anderson(data1, alpha = alpha)
is_normal2 = test_anderson(data2, alpha = alpha)
else:
is_normal1 = test_shapiro(data1, alpha = alpha)
is_normal2 = test_shapiro(data2, alpha = alpha)
# analysis
if n >= 100:
result = correlation_pearson(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
if is_normal1 and is_normal2:
result = correlation_pearson(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
result = correlation_spearman(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
# num - ord
if (type1 == "float64" and type2 == "int64") or (type1 == "int64" and type2 == "float64"):
# number of categories
if type1 == "int64":
ncat = len(np.unique(data1))
else:
ncat = len(np.unique(data2))
# analysis
if ncat < 5:
result = correlation_kendalltau(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
if n >= 100:
result = correlation_pearson(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
result = correlation_spearman(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
# return
return result | 6eaf34a12281949236d28143399024ed30e834ad | 16,150 |
def sha256(buffer=None):
"""Secure Hash Algorithm 2 (SHA-2) with 256 bits hash value."""
return Hash("sha256", buffer) | 2e33c38c0f7b9dd019104a18e6842243773686ca | 16,151 |
import math
import torch
def nmc_eig(model, design, observation_labels, target_labels=None,
N=100, M=10, M_prime=None, independent_priors=False):
"""
Nested Monte Carlo estimate of the expected information
gain (EIG). The estimate is, when there are not any random effects,
.. math::
\\frac{1}{N}\\sum_{n=1}^N \\log p(y_n | \\theta_n, d) -
\\frac{1}{N}\\sum_{n=1}^N \\log \\left(\\frac{1}{M}\\sum_{m=1}^M p(y_n | \\theta_m, d)\\right)
The estimate is, in the presence of random effects,
.. math::
\\frac{1}{N}\\sum_{n=1}^N \\log \\left(\\frac{1}{M'}\\sum_{m=1}^{M'}
p(y_n | \\theta_n, \\widetilde{\\theta}_{nm}, d)\\right)-
\\frac{1}{N}\\sum_{n=1}^N \\log \\left(\\frac{1}{M}\\sum_{m=1}^{M}
p(y_n | \\theta_m, \\widetilde{\\theta}_{m}, d)\\right)
The latter form is used when `M_prime != None`.
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param int N: Number of outer expectation samples.
:param int M: Number of inner expectation samples for `p(y|d)`.
:param int M_prime: Number of samples for `p(y | theta, d)` if required.
:param bool independent_priors: Only used when `M_prime` is not `None`. Indicates whether the prior distributions
for the target variables and the nuisance variables are independent. In this case, it is not necessary to
sample the targets conditional on the nuisance variables.
:return: EIG estimate
:rtype: `torch.Tensor`
"""
if isinstance(observation_labels, str): # list of strings instead of strings
observation_labels = [observation_labels]
if isinstance(target_labels, str):
target_labels = [target_labels]
# Take N samples of the model
expanded_design = lexpand(design, N) # N copies of the model
trace = poutine.trace(model).get_trace(expanded_design)
trace.compute_log_prob()
if M_prime is not None:
y_dict = {l: lexpand(trace.nodes[l]["value"], M_prime) for l in observation_labels}
theta_dict = {l: lexpand(trace.nodes[l]["value"], M_prime) for l in target_labels}
theta_dict.update(y_dict)
# Resample M values of u and compute conditional probabilities
# WARNING: currently the use of condition does not actually sample
# the conditional distribution!
# We need to use some importance weighting
conditional_model = pyro.condition(model, data=theta_dict)
if independent_priors:
reexpanded_design = lexpand(design, M_prime, 1)
else:
# Not acceptable to use (M_prime, 1) here - other variables may occur after
# theta, so need to be sampled conditional upon it
reexpanded_design = lexpand(design, M_prime, N)
retrace = poutine.trace(conditional_model).get_trace(reexpanded_design)
retrace.compute_log_prob()
conditional_lp = sum(retrace.nodes[l]["log_prob"] for l in observation_labels).logsumexp(0) \
- math.log(M_prime)
else:
# This assumes that y are independent conditional on theta
# Furthermore assume that there are no other variables besides theta
conditional_lp = sum(trace.nodes[l]["log_prob"] for l in observation_labels)
y_dict = {l: lexpand(trace.nodes[l]["value"], M) for l in observation_labels}
# Resample M values of theta and compute conditional probabilities
conditional_model = pyro.condition(model, data=y_dict)
# Using (M, 1) instead of (M, N) - acceptable to re-use thetas between ys because
# theta comes before y in graphical model
reexpanded_design = lexpand(design, M, 1) # sample M theta
retrace = poutine.trace(conditional_model).get_trace(reexpanded_design)
retrace.compute_log_prob()
marginal_lp = sum(retrace.nodes[l]["log_prob"] for l in observation_labels).logsumexp(0) \
- math.log(M)
terms = conditional_lp - marginal_lp
nonnan = (~torch.isnan(terms)).sum(0).type_as(terms)
terms[torch.isnan(terms)] = 0.
return terms.sum(0)/nonnan | 8de69e87677a4a74fd04ce4cf302221121d00b2d | 16,152 |
import scipy
def _orient_eigs(eigvecs, phasing_track, corr_metric=None):
"""
Orient each eigenvector deterministically according to the orientation
that correlates better with the phasing track.
Parameters
----------
eigvecs : 2D array (n, k)
`k` eigenvectors (as columns).
phasing_track : 1D array (n,)
Reference track for determining orientation.
corr_metric: spearmanr, pearsonr, var_explained, MAD_explained
Correlation metric to use for selecting orientations.
Returns
-------
2D array (n, k)
Reoriented `k` eigenvectors.
Notes
-----
This function does NOT change the order of the eigenvectors.
"""
for i in range(eigvecs.shape[1]):
mask = np.isfinite(eigvecs[:, i]) & np.isfinite(phasing_track)
if corr_metric is None or corr_metric == "spearmanr":
corr = scipy.stats.spearmanr(phasing_track[mask], eigvecs[mask, i])[0]
elif corr_metric == "pearsonr":
corr = scipy.stats.pearsonr(phasing_track[mask], eigvecs[mask, i])[0]
elif corr_metric == "var_explained":
corr = scipy.stats.pearsonr(phasing_track[mask], eigvecs[mask, i])[0]
# multiply by the sign to keep the phasing information
corr = np.sign(corr) * corr * corr * np.var(eigvecs[mask, i])
elif corr_metric == "MAD_explained":
corr = (
numutils.COMED(phasing_track[mask], eigvecs[mask, i]) *
numutils.MAD(eigvecs[mask, i])
)
else:
raise ValueError("Unknown correlation metric: {}".format(corr_metric))
eigvecs[:, i] = np.sign(corr) * eigvecs[:, i]
return eigvecs | d6feebbd7b7748549ebc494bf8b00f0d9e313f7c | 16,153 |
def test_CreativeProject_auto_multivariate_functional(max_iter, max_response, error_lim, model_type):
"""
test that auto method works for a particular multivariate (bivariate) function
"""
# define data
covars = [(0.5, 0, 1), (0.5, 0, 1)] # covariates come as a list of tuples (one per covariate: (<initial_guess>, <min>, <max>))
# define response function
def f(x):
return (-(6 * x['covar0'].iloc[0] - 2) ** 2 * np.sin(12 * x['covar0'].iloc[0] - 4)) * (-(6 * x['covar1'].iloc[0] - 2) ** 2 * np.sin(12 * x['covar1'].iloc[0] - 4))
# initialize class instance
cc = TuneSession(covars=covars, model=model_type)
# run the auto-method
cc.auto(response_samp_func=f, max_iter=max_iter)
# assert that max_iter steps taken by optimizer
assert cc.model["covars_sampled_iter"] == max_iter
assert cc.model["covars_proposed_iter"] == max_iter
assert cc.model["response_sampled_iter"] == max_iter
# assert that training and test data is stored
assert cc.train_X.shape[0] == max_iter
assert cc.proposed_X.shape[0] == max_iter
assert cc.train_X.shape[0] == max_iter
assert cc.train_X.shape[1] == 2 # check that it's bivariate train_X
# assert that best response is stored at each step
assert cc.covars_best_response_value.shape[0] == max_iter
assert cc.best_response_value.shape[0] == max_iter
# assert that the correct maximum and covariate values for that spot are identified
THEORETICAL_MAX_COVAR = 1.0
for it in range(len(covars)):
assert abs(cc.covars_best_response_value[-1, it].item() - THEORETICAL_MAX_COVAR)/THEORETICAL_MAX_COVAR \
< error_lim
assert abs(cc.best_response_value[-1].item() - max_response)/max_response < error_lim | ee0cc1d34a1836c8ea9ec2b23de175f4b6d8ca75 | 16,154 |
def crossValidate(x, y, cv=5, K=None):
"""
:param y: N*L ranking vectors
:return:
"""
results = {"perf": []}
## cross validation ##
np.random.seed(1100)
kf = KFold(n_splits=cv, shuffle=True, random_state=0)
for train, test in kf.split(x):
x_train = x[train, :]
y_train = y[train, :]
x_test = x[test, :]
y_test = y[test, :]
# y_pred = KNN(K=K).fit(x_train, y_train).predict(x_test)
y_pred = multithreadPredict(x_test, KNN(K=K).fit(x_train, y_train))
print y_pred
# print y_pred ### test
results["perf"].append(perfMeasure(y_pred, y_test, rankopt=True))
# print results["perf"][-1]
for key in results.keys():
item = np.array(results[key])
mean = np.nanmean(item, axis=0)
std = np.nanstd(item, axis=0)
results[key] = [mean, std]
return results | 820f5b53a38d2a64a3a1ee740d0fded020000bb7 | 16,155 |
def make_word_groups(vocab_words):
"""
:param vocab_words: list of vocabulary words with a prefix.
:return: str of prefix followed by vocabulary words with
prefix applied, separated by ' :: '.
This function takes a `vocab_words` list and returns a string
with the prefix and the words with prefix applied, separated
by ' :: '.
"""
vocab_words.reverse()
prefix = vocab_words.pop()
new_list = [prefix]
vocab_words.reverse()
for i in range(len(vocab_words)):
new_list.append(prefix + vocab_words[i])
# print(new_list)
return " :: ".join(new_list) | f940c602939ca3a9bab013f5847918f7ba4536ae | 16,156 |
def gpi_g10s40(rescale=False):
"""
Multiply by the 'rescale' factor to adjust hole sizes and centers in entrance pupil (PM)
(Magnify the physical mask coordinates up to the primary mirror size)
"""
demag = gpi_mag_asdesigned()
if rescale:
demag = demag/rescale # rescale 1.1 gives a bigger mask in PM pupil space
print ("gpi_g10s4...")
hdia, ctrs = gpi_g10s40_asmanufactured(1.0/demag) # meters
return hdia, ctrs
""" From GPI FPRD 2008 http://dms.hia.nrc.ca/view.php?fDocumentId=1398
Filter 1/2 pwr bandwidth
name wavelen/um %
Y 0.95-1.14 18
J 1.12-1.35 19
H 1.50-1.80 18
K1 1.9-2.19 14
K2 2.13-2.4 12
Spectral Resolution 34-36 35-39 44-49 62-70 75-83
# spectral pixels 12-13 13-15 16-18 18-20
18-20
pixels 14mas are nyquist at 1.1
""" | 4be151f7e99332be0f67d00619fe75def90c2b5d | 16,157 |
import inspect
def test_close_sections():
"""Parse sections without blank lines in between."""
def f(x, y, z):
"""
Parameters
----------
x :
X
y :
Y
z :
Z
Raises
------
Error2
error.
Error1
error.
Returns
-------
str
value
"""
return x + y + z
sections, errors = parse(inspect.getdoc(f), inspect.signature(f))
assert len(sections) == 3
assert not errors | 793d92d989f3caa020c06ea798f7f34703abd747 | 16,158 |
import sys
def get_good_contours(proc_image, image, bb, savedir, max_num_add=None):
"""
Adapted from `click_and_crop_v3.py`, except that we have to make the contours.
Here, we're going to inspect and check that the contours are reasonable.
Returns a list of processed contours that I'll then use for later.
"""
cv2.imshow("Now detecting contours for this image.", proc_image)
key = cv2.waitKey(0)
if key in utils.ESC_KEYS:
sys.exit()
(cnts, _) = cv2.findContours(proc_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
processed = []
for c in cnts:
try:
# Find the centroids of the contours in _pixel_space_. :)
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
if utils.filter_point(cX,cY,xlower=bb[0],xupper=bb[0]+bb[2],ylower=bb[1],yupper=bb[1]+bb[3]):
continue
# Now fit an ellipse!
ellipse = cv2.fitEllipse(c)
cv2.ellipse(image, ellipse, (0,255,0), 2)
name = "Is this ellipse good? ESC to skip it, else add it."
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(name, 2000, 4000)
cv2.imshow(name, image)
firstkey = cv2.waitKey(0)
if firstkey not in utils.ESC_KEYS:
angle = ellipse[2]
yaw = utils.opencv_ellipse_angle_to_robot_yaw(angle)
processed.append( (cX,cY,angle,yaw) )
cv2.circle(img=image, center=(cX,cY), radius=5, color=(0,0,255), thickness=-1)
cv2.putText(img=image,
text="{},{:.1f}".format(len(processed), angle),
org=(cX,cY),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1,
color=(255,0,0),
thickness=2)
if (max_num_add is not None) and (len(processed) == max_num_add):
break
except:
pass
assert len(processed) >= 1
cv2.destroyAllWindows()
# Save images for debugging. Then return the processed list.
cv2.imshow("FINAL IMAGE before saving (PRESS ESC IF BAD).", image)
key = cv2.waitKey(0)
if key in utils.ESC_KEYS:
sys.exit()
cv2.imwrite(savedir, image)
return processed | 4437c44c249b1bd4bf6bcef90d38defdcbe9bc48 | 16,159 |
def _int_converter(value):
"""Convert string value to int.
We do not use the int converter default exception since we want to make
sure the exact http response code.
Raises: exception_handler.BadRequest if value can not be parsed to int.
Examples:
/<request_path>?count=10 parsed to {'count': '10'} and it should be
converted to {'count': 10}.
"""
try:
return int(value)
except Exception:
raise exception_handler.BadRequest(
'%r type is not int' % value
) | 6b5c99635211bf8ce2e3c2adc784f2a4e9ee355f | 16,160 |
from typing import List
def split_rule(rules, rule_name, symbols_to_extract: List[str], subrule_name: str):
"""
Let only options which are starting with symbols from symbols_to_extract.
Put the rest to a subrule.
"""
r = rule_by_name(rules, rule_name)
assert isinstance(r.body, Antlr4Selection), r
sub_options = Antlr4Selection([])
for o in r.body:
start_symbols = set()
_direct_left_corner(o, start_symbols, allow_eps_in_sel=True)
if not start_symbols.intersection(symbols_to_extract):
sub_options.append(o)
r.body = Antlr4Selection([o for o in r.body if not (o in sub_options)])
r.body.insert(0, Antlr4Symbol(subrule_name, False))
if len(r.body) == 1:
r.body = r.body[0]
assert len(sub_options) > 0
if len(sub_options) == 1:
sub_options = sub_options[0]
else:
sub_options = Antlr4Selection(sub_options)
sub_r = Antlr4Rule(subrule_name, sub_options)
rules.insert(rules.index(r), sub_r)
return sub_r | aa4d2aac62c488e3cd8d002556edea3aaef7185b | 16,161 |
def ESS(works_prev, works_incremental):
"""
compute the effective sample size (ESS) as given in Eq 3.15 in https://arxiv.org/abs/1303.3123.
Parameters
----------
works_prev: np.array
np.array of floats representing the accumulated works at t-1 (unnormalized)
works_incremental: np.array
np.array of floats representing the incremental works at t (unnormalized)
Returns
-------
normalized_ESS: float
effective sample size
"""
prev_weights_normalized = np.exp(-works_prev - logsumexp(-works_prev))
incremental_weights_unnormalized = np.exp(-works_incremental)
ESS = np.dot(prev_weights_normalized, incremental_weights_unnormalized)**2 / np.dot(np.power(prev_weights_normalized, 2), np.power(incremental_weights_unnormalized, 2))
normalized_ESS = ESS / len(prev_weights_normalized)
assert normalized_ESS >= 0.0 - DISTRIBUTED_ERROR_TOLERANCE and normalized_ESS <= 1.0 + DISTRIBUTED_ERROR_TOLERANCE, f"the normalized ESS ({normalized_ESS} is not between 0 and 1)"
return normalized_ESS | 514ca2462708a4c163f45e92854159d50eb5f3a8 | 16,162 |
def new_line_over():
"""Creates a new line over the cursor.
The cursor is also moved to the beginning of the new line. It is
not possible to create more than one new line over the cursor
at a time for now.
Usage:
`In a config file:`
.. code-block:: yaml
- new_line_over:
`Using the API:`
.. code-block:: python
ezvi.tools.new_line_over()
:rtype: str
:return: Characters that would be used in ``Vi`` to add a new line
over the cursor.
"""
to_write = "O" + ESCAPE
return to_write | 41da4d301240a8ea3d9108dd1d957a30cff1097b | 16,163 |
import json
def lambda_handler(event, context):
"""Calls custom job waiter developed by user
Arguments:
event {dict} -- Dictionary with details on previous processing step
context {dict} -- Dictionary with details on Lambda context
Returns:
{dict} -- Dictionary with Processed Bucket, Key(s) and Job Details
"""
try:
logger.info("Lambda event is [{}]".format(event))
logger.info(event["body"])
source_bucket = event["body"]["bucket"]
job_name = event["body"]["targetJob"]
ddb_table = event["body"]["targetDDBTable"]
token = event["body"]["token"]
s3_prefix_key_proc = event["body"]["keysRawProc"]
logger.info(
"[{}] [{}] [{}] [{}]".format(
source_bucket,
s3_prefix_key_proc,
job_name,
ddb_table,
)
)
# Submitting a new Glue Job
job_response = client.start_job_run(
JobName=job_name,
Arguments={
# Specify any arguments needed based on bucket and keys (e.g. input/output S3 locations)
"--job-bookmark-option": "job-bookmark-enable",
"--additional-python-modules": "pyarrow==2,awswrangler==2.9.0",
# Custom arguments below
"--TARGET_DDB_TABLE": ddb_table,
"--S3_BUCKET": source_bucket,
"--S3_PREFIX_PROCESSED": s3_prefix_key_proc[0]
#
},
MaxCapacity=2.0,
)
logger.info("Response is [{}]".format(job_response))
# Collecting details about Glue Job after submission (e.g. jobRunId for Glue)
json_data = json.loads(json.dumps(job_response, default=datetimeconverter))
job_details = {
"jobName": job_name,
"jobRunId": json_data.get("JobRunId"),
"jobStatus": "STARTED",
"token": token,
}
response = {"jobDetails": job_details}
except Exception as e:
logger.error("Fatal error", exc_info=True)
sagemaker.send_pipeline_execution_step_failure(
CallbackToken=token, FailureReason="error"
)
raise e
return response | e5a4055a39d0df1fabd3ad5f70a2859524378f44 | 16,164 |
def put_path(components, value):
"""Recursive function to put value in component"""
if len(components) > 1:
new = components.pop(0)
value = put_path(components, value)
else:
new = components[0]
return {new: value} | 77db4064a77cf1cdcde1d74d901410525722b66e | 16,165 |
def con_orthogonal_checkboard(X,c_v1,c_v2,c_v3,c_v4,num,N):
"""for principal / isothermic / developable mesh / aux_diamond / aux_cmc
(v1-v3)*(v2-v4)=0
"""
col = np.r_[c_v1,c_v2,c_v3,c_v4]
row = np.tile(np.arange(num),12)
d1 = X[c_v2]-X[c_v4]
d2 = X[c_v1]-X[c_v3]
d3 = X[c_v4]-X[c_v2]
d4 = X[c_v3]-X[c_v1]
data = np.r_[d1,d2,d3,d4]
H = sparse.coo_matrix((data,(row,col)), shape=(num, N))
r = np.einsum('ij,ij->i',d1.reshape(-1,3, order='F'),d2.reshape(-1,3, order='F'))
return H,r | f05228d6caa49f60a2a9f515ce5590e6f13127e0 | 16,166 |
def _PropertyGridInterface_GetPropertyValues(self, dict_=None, as_strings=False, inc_attributes=False):
"""
Returns all property values in the grid.
:param `dict_`: A to fill with the property values. If not given,
then a new one is created. The dict_ can be an object as well,
in which case it's __dict__ is used.
:param `as_strings`: if True, then string representations of values
are fetched instead of native types. Useful for config and such.
:param `inc_attributes`: if True, then property attributes are added
in the form of "@<propname>@<attr>".
:returns: A dictionary with values. It is always a dictionary,
so if dict_ was and object with __dict__ attribute, then that
attribute is returned.
"""
if dict_ is None:
dict_ = {}
elif hasattr(dict_,'__dict__'):
dict_ = dict_.__dict__
getter = self.GetPropertyValue if not as_strings else self.GetPropertyValueAsString
it = self.GetVIterator(PG_ITERATE_PROPERTIES)
while not it.AtEnd():
p = it.GetProperty()
name = p.GetName()
dict_[name] = getter(p)
if inc_attributes:
attrs = p.GetAttributes()
if attrs and len(attrs):
dict_['@%s@attr'%name] = attrs
it.Next()
return dict_ | 06974bec88351d5e8743b43e7c0495bb40545ef0 | 16,167 |
def get_pipelines(exp_type, cal_ver=None, context=None):
"""Given `exp_type` and `cal_ver` and `context`, locate the appropriate SYSTEM CRDSCFG
reference file and determine the sequence of pipeline .cfgs required to process that
exp_type.
"""
context = _get_missing_context(context)
cal_ver = _get_missing_calver(cal_ver)
with log.augment_exception("Failed determining required pipeline .cfgs for",
"EXP_TYPE", srepr(exp_type), "CAL_VER", srepr(cal_ver)):
config_manager = _get_config_manager(context, cal_ver)
return config_manager.exptype_to_pipelines(exp_type) | 7fb4a02ffe7598df4621b2fd4a6863094616fd41 | 16,168 |
def distance_to_line(p,a,b):
"""
Computes the perpendicular distance from a point to an infinite line.
Parameters
----------
p : (x,y)
Coordinates of a point.
a : (x,y)
Coordinates of a point on a line.
b : (x,y)
Coordinates of another point on a line.
Returns
----------
float
The Euclidean distance from p to the infinite line through a & b.
"""
# code by BJK
# area of triangle formed between point and line segment
trianglearea=abs(area([a,b,p]))
# length of line segment
line_length=distance(a,b)
# make sure line segment has a length
if line_length==0:
# a & b are the same, so just calculate distance between points
return distance(p,a)
else:
# the distance we want is the height of the triangle
# area is 1/2 base x height so height is 2*area/base
return 2*trianglearea/line_length | 1b1d0ef37587cd8cb0f5730ac78c39ec8b42faec | 16,169 |
def pearsonr(A, B):
"""
A broadcasting method to compute pearson r and p
-----------------------------------------------
Parameters:
A: matrix A, (i*k)
B: matrix B, (j*k)
Return:
rcorr: matrix correlation, (i*j)
pcorr: matrix correlation p, (i*j)
Example:
>>> rcorr, pcorr = pearsonr(A, B)
"""
if isinstance(A,list):
A = np.array(A)
if isinstance(B,list):
B = np.array(B)
if np.ndim(A) == 1:
A = A[None,:]
if np.ndim(B) == 1:
B = B[None,:]
A_mA = A - A.mean(1)[:, None]
B_mB = B - B.mean(1)[:, None]
ssA = (A_mA**2).sum(1)
ssB = (B_mB**2).sum(1)
rcorr = np.dot(A_mA, B_mB.T)/np.sqrt(np.dot(ssA[:,None], ssB[None]))
df = A.T.shape[1] - 2
r_forp = rcorr*1.0
r_forp[r_forp==1.0] = 0.0
t_squared = rcorr.T**2*(df/((1.0-rcorr.T)*(1.0+rcorr.T)))
pcorr = special.betainc(0.5*df, 0.5, df/(df+t_squared))
return rcorr, pcorr | f66ca9eb6c6367580043ab9d512400c826d30d39 | 16,170 |
def inst_bench(dt, gt, bOpts, tp=None, fp=None, score=None, numInst=None):
"""
ap, rec, prec, npos, details = inst_bench(dt, gt, bOpts, tp = None, fp = None, sc = None, numInst = None)
dt - a list with a dict for each image and with following fields
.boxInfo - info that will be used to cpmpute the overlap with ground truths, a list
.sc - score
gt
.boxInfo - info used to compute the overlap, a list
.diff - a logical array of size nGtx1, saying if the instance is hard or not
bOpt
.minoverlap - the minimum overlap to call it a true positive
[tp], [fp], [sc], [numInst]
Optional arguments, in case the inst_bench_image is being called outside of this function
"""
details = None
if tp is None:
# We do not have the tp, fp, sc, and numInst, so compute them from the structures gt, and out
tp = []
fp = []
numInst = []
score = []
dupDet = []
instId = []
ov = []
for i in range(len(gt)):
# Sort dt by the score
sc = dt[i]["sc"]
bb = dt[i]["boxInfo"]
ind = np.argsort(sc, axis=0)
ind = ind[::-1]
if len(ind) > 0:
sc = np.vstack((sc[i, :] for i in ind))
bb = np.vstack((bb[i, :] for i in ind))
else:
sc = np.zeros((0, 1)).astype(np.float)
bb = np.zeros((0, 4)).astype(np.float)
dtI = dict({"boxInfo": bb, "sc": sc})
tp_i, fp_i, sc_i, numInst_i, dupDet_i, instId_i, ov_i = inst_bench_image(
dtI, gt[i], bOpts
)
tp.append(tp_i)
fp.append(fp_i)
score.append(sc_i)
numInst.append(numInst_i)
dupDet.append(dupDet_i)
instId.append(instId_i)
ov.append(ov_i)
details = {
"tp": list(tp),
"fp": list(fp),
"score": list(score),
"dupDet": list(dupDet),
"numInst": list(numInst),
"instId": list(instId),
"ov": list(ov),
}
tp = np.vstack(tp[:])
fp = np.vstack(fp[:])
sc = np.vstack(score[:])
cat_all = np.hstack((tp, fp, sc))
ind = np.argsort(cat_all[:, 2])
cat_all = cat_all[ind[::-1], :]
tp = np.cumsum(cat_all[:, 0], axis=0)
fp = np.cumsum(cat_all[:, 1], axis=0)
thresh = cat_all[:, 2]
npos = np.sum(numInst, axis=0)
# Compute precision/recall
rec = tp / npos
prec = np.divide(tp, (fp + tp))
ap = VOCap(rec, prec)
return ap, rec, prec, npos, details | 9f8e12863205c24247003a4c95cf52f99086a6a6 | 16,171 |
from typing import Tuple
from typing import List
def _tee(
cmd: str, executable: str, abort_on_error: bool
) -> Tuple[int, List[str]]:
"""
Execute command "cmd", capturing its output and removing empty lines.
:return: list of strings
"""
_LOG.debug("cmd=%s executable=%s", cmd, executable)
rc, output = hsysinte.system_to_string(cmd, abort_on_error=abort_on_error)
hdbg.dassert_isinstance(output, str)
output1 = output.split("\n")
_LOG.debug("output1= (%d)\n'%s'", len(output1), "\n".join(output1))
#
output2 = hprint.remove_empty_lines_from_string_list(output1)
_LOG.debug("output2= (%d)\n'%s'", len(output2), "\n".join(output2))
_dassert_list_of_strings(output2)
return rc, output2 | 4aafdac48b9deb4810b96f99ffc178464b79372a | 16,172 |
def normalized_str(token):
"""
Return as-is text for tokens that are proper nouns or acronyms, lemmatized
text for everything else.
Args:
token (``spacy.Token`` or ``spacy.Span``)
Returns:
str
"""
if isinstance(token, SpacyToken):
return token.text if preserve_case(token) else token.lemma_
elif isinstance(token, SpacySpan):
return ' '.join(subtok.text if preserve_case(subtok) else subtok.lemma_
for subtok in token)
else:
msg = 'Input must be a spacy Token or Span, not {}.'.format(type(token))
raise TypeError(msg) | c5e30b48716fa99bfbcf8252b3ecd018cc921cbe | 16,173 |
def scatter_nd(*args, **kwargs):
""" See https://www.tensorflow.org/api_docs/python/tf/scatter_nd .
"""
return tensorflow.scatter_nd(*args, **kwargs) | 5b5d457c91df73314de6d81c105132d6b69eb1aa | 16,174 |
from typing import Union
from typing import Tuple
def concatenate_sequences(X: Union[list, np.ndarray], y: Union[list, np.ndarray],
sequence_to_value: bool = False) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Concatenate multiple sequences to scikit-learn compatible numpy arrays.
´Parameters
-----------
X : Union[list, np.ndarray] of shape=(n_sequences, )
All sequences. Note that all elements in ```X```
must have at least one equal dimension.
y : Union[list, np.ndarray] of shape=(n_sequences, )
All sequences. Note that all elements in ```X```
must have at least one equal dimension.
sequence_to_value : bool, default=False
If true, expand each element of y to the sequence length
Returns
-------
X : np.ndarray of shape=(n_samples, n_features)
Input data where n_samples is the accumulated length of all sequences
y : np.ndarray of shape=(n_samples, n_features) or shape=(n_samples, )
Target data where n_samples is the accumulated length of all sequences
sequence_ranges : Union[None, np.ndarray] of shape=(n_sequences, 2)
Sequence border indicator matrix
"""
if isinstance(X, list):
X = np.asarray(X)
if isinstance(y, list):
y = np.asarray(y)
X = np.array(X)
y = np.array(y)
if sequence_to_value:
for k, _ in enumerate(y):
y[k] = np.repeat(y[k], X[k].shape[0])
check_consistent_length(X, y)
sequence_ranges: np.ndarray = np.ndarray([])
if X.ndim == 1:
sequence_ranges = np.zeros((X.shape[0], 2), dtype=int)
sequence_ranges[:, 1] = np.cumsum([X[k].shape[0] for k, _ in enumerate(X)])
sequence_ranges[1:, 0] = sequence_ranges[:-1, 1]
for k, _ in enumerate(X):
X[k], y[k] = check_X_y(X[k], y[k], multi_output=True)
return np.concatenate(X), np.concatenate(y), sequence_ranges | b4b2489eeb601ce5378f6cf7b2cce7daf68bdf1d | 16,175 |
def get_export_table_operator(table_name, dag=None):
"""Get templated BigQueryToCloudStorageOperator.
Args:
table_name (string): Name of the table to export.
dag (airflow.models.DAG): DAG used by context_manager. e.g. `with get_dag() as dag: get_export_table_operator(..., dag=dag)`. Defaults to None.
Returns:
airflow.contrib.operators.bigquery_operator.BigQueryOperator
"""
if dag is None:
logger.warning('No DAG context was found. The operator may not be associated to any DAG nor appeared in Web UI')
date_descriptor = '{{ ds_nodash }}'
table_name_with_date_descriptor = \
'{table_name}{date_descriptor}'.format(
table_name=table_name,
date_descriptor=date_descriptor)
return BigQueryToCloudStorageOperator(
dag=dag or models._CONTEXT_MANAGER_DAG,
task_id='{experiment_name}.{table_name}.export'
.format(
experiment_name=get_config('experiment_name'),
table_name=table_name),
source_project_dataset_table='{gcp_project_name}.{database_name}.{table_name}'
.format(
gcp_project_name=get_config('gcp_project_name'),
database_name='%s_database' % get_config('experiment_name'),
table_name=table_name_with_date_descriptor),
# TODO: 1GB以上のデータに対応
# https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
destination_cloud_storage_uris=[
'gs://{bucket_name}/{experiment_name}/exported_tables/'
'{table_name}/{date_descriptor}/'
'out.csv.gzip'.format(
bucket_name=get_config('bucket_name'),
experiment_name=get_config('experiment_name'),
date_descriptor=date_descriptor,
table_name=table_name)],
compression="GZIP") | b1fd75caf10bc5fefbba4b5702bf05ab1ec6be6c | 16,176 |
def run_command_with_code(cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = sp.PIPE
else:
stdout = None
proc = sp.Popen(cmd, stdout=stdout, stderr=sp.PIPE)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
log.error('Command "%s" failed.\n%s', ' '.join(cmd), output)
return output, proc.returncode | 45e8592def8290f45458a183bed410072cc15000 | 16,177 |
import logging
async def delete_project(
delete_project_request: DeleteProject, token: str = Depends(oauth2_scheme)
):
"""[API router to delete project on AWS Rekognition]
Args:
delete_project_request (DeleteProject): [AWS Rekognition create project request]
token (str, optional): [Bearer token for authentication]. Defaults to Depends(oauth2_scheme).
Raises:
HTTPException: [Unauthorized exception when invalid token is passed]
error: [Exception in underlying controller]
Returns:
[DeleteProjectResponse]: [AWS Rekognition delete project response]
"""
try:
logging.info("Calling /aws/rekog/delete_project endpoint")
logging.debug(f"Request: {delete_project_request}")
if decodeJWT(token=token):
response = ProjectController().delete_project(
request=delete_project_request
)
return DeleteProjectResponse(**response)
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid access token",
headers={"WWW-Authenticate": "Bearer"},
)
except Exception as error:
logging.error(f"Error in /aws/rekog/delete_project endpoint: {error}")
raise error | 5711adf3ee9177952561d825a733ee169b6f97b0 | 16,178 |
from typing import Union
import ast
def _create_element_invocation(span_: span.Span, callee: Union[ast.NameRef,
ast.ModRef],
arg_array: ast.Expr) -> ast.Invocation:
"""Creates a function invocation on the first element of the given array.
We need to create a fake invocation to deduce the type of a function
in the case where map is called with a builtin as the map function. Normally,
map functions (including parametric ones) have their types deduced when their
ast.Function nodes are encountered (where a similar fake ast.Invocation node
is created).
Builtins don't have ast.Function nodes, so that inference can't occur, so we
essentually perform that synthesis and deduction here.
Args:
span_: The location in the code where analysis is occurring.
callee: The function to be invoked.
arg_array: The array of arguments (at least one) to the function.
Returns:
An invocation node for the given function when called with an element in the
argument array.
"""
annotation = ast.TypeAnnotation(
span_, scanner.Token(scanner.TokenKind.KEYWORD, span_,
scanner.Keyword.U32), ())
index_number = ast.Number(
scanner.Token(scanner.TokenKind.KEYWORD, span_, '32'), annotation)
index = ast.Index(span_, arg_array, index_number)
return ast.Invocation(span_, callee, (index,)) | 0449c27fc6e7f16054bddfd99bd9e64109b9ee0e | 16,179 |
import os
import logging
def _CreateClassToFileNameDict(test_apk):
"""Creates a dict mapping classes to file names from size-info apk."""
constants.CheckOutputDirectory()
test_apk_size_info = os.path.join(constants.GetOutDirectory(), 'size-info',
os.path.basename(test_apk) + '.jar.info')
class_to_file_dict = {}
# Some tests such as webview_cts_tests use a separately downloaded apk to run
# tests. This means the apk may not have been built by the system and hence
# no size info file exists.
if not os.path.exists(test_apk_size_info):
logging.debug('Apk size file not found. %s', test_apk_size_info)
return class_to_file_dict
with open(test_apk_size_info, 'r') as f:
for line in f:
file_class, file_name = line.rstrip().split(',', 1)
# Only want files that are not prebuilt.
if file_name.startswith('../../'):
class_to_file_dict[file_class] = str(
file_name.replace('../../', '//', 1))
return class_to_file_dict | 96ba6c74217d212ee50d454225f334528919292c | 16,180 |
import time
def train_deeper_better(train_data, train_labels, test_data, test_labels, params):
"""Same as 'train_deeper', but now with tf.contrib.data.Dataset input pipeline."""
default_params = {
'regularization_coeff': 0.00001,
'keep_prob': 0.5,
'batch_size': 128,
'fc1_size': 2048,
'fc2_size': 1024,
'fc3_size': 1024,
'fc4_size': 1024,
'fc5_size': 512,
'activation': 'relu',
}
activation_funcs = {
'relu': tf.nn.relu,
'tanh': tf.nn.tanh,
}
def get_param(name):
if name in params:
return params[name]
logger.warning('%s not found in param, use default value %r', name, default_params[name])
return default_params[name]
regularization_coeff = get_param('regularization_coeff')
keep_prob_param = get_param('keep_prob')
batch_size = int(get_param('batch_size'))
fc1_size = int(get_param('fc1_size'))
fc2_size = int(get_param('fc2_size'))
fc3_size = int(get_param('fc3_size'))
fc4_size = int(get_param('fc4_size'))
fc5_size = int(get_param('fc5_size'))
activation_func = activation_funcs[get_param('activation')]
save_restore = False
time_limit_seconds = 3600
saver_path = join(SAVER_FOLDER, train_deeper_better.__name__)
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(52)
global_step_tensor = tf.contrib.framework.get_or_create_global_step()
epoch_tensor = tf.Variable(0, trainable=False, name='epoch')
next_epoch = tf.assign_add(epoch_tensor, 1)
# dataset definition
dataset = Dataset.from_tensor_slices({'x': train_data, 'y': train_labels})
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()
sample = iterator.get_next()
x = sample['x']
y = sample['y']
# actual computation graph
keep_prob = tf.placeholder(tf.float32)
is_training = tf.placeholder(tf.bool, name='is_training')
regularizer = tf.contrib.layers.l2_regularizer(scale=regularization_coeff)
def fully_connected(x, size, name):
return dense_regularized(
x, size, is_training, keep_prob, regularizer, name, activation_func,
)
fc1 = fully_connected(x, fc1_size, 'fc1')
fc2 = fully_connected(fc1, fc2_size, 'fc2')
fc3 = fully_connected(fc2, fc3_size, 'fc3')
fc4 = fully_connected(fc3, fc4_size, 'fc4')
fc5 = fully_connected(fc4, fc5_size, 'fc5')
logits = dense(fc5, NUM_CLASSES, regularizer, 'logits')
layer_summaries(logits, 'logits_summaries')
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(logits, 1)), tf.float32),
)
accuracy_percent = 100 * accuracy
tf.summary.scalar('accuracy_percent', accuracy_percent)
with tf.name_scope('loss'):
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
regularization_loss = tf.reduce_sum(regularization_losses)
cross_entropy_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y),
)
loss = cross_entropy_loss + regularization_loss
tf.summary.scalar('regularization_loss', regularization_loss)
tf.summary.scalar('cross_entropy_loss', cross_entropy_loss)
tf.summary.scalar('loss', loss)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# ensures that we execute the update_ops before performing the train_op
# needed for batch normalization (apparently)
optimizer = tf.train.AdamOptimizer(learning_rate=(1e-4), epsilon=1e-3)
train_op = optimizer.minimize(loss, global_step=global_step_tensor)
all_summaries = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(join(SUMMARY_FOLDER, 'train'))
batch_writer = tf.summary.FileWriter(join(SUMMARY_FOLDER, 'batch'))
test_writer = tf.summary.FileWriter(join(SUMMARY_FOLDER, 'test'))
saver = tf.train.Saver(max_to_keep=3)
test_accuracy = 0
best_accuracy = 0
with tf.Session(graph=graph) as sess:
restored = False
if save_restore:
try:
saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir=SAVER_FOLDER))
restored = True
except ValueError as exc:
logger.info('Could not restore previous session! %r', exc)
logger.info('Starting from scratch!')
if not restored:
tf.global_variables_initializer().run()
logger.info('Starting training...')
start_time = time.time()
def enough():
if time_limit_seconds is None:
return False
elapsed = time.time() - start_time
return elapsed > time_limit_seconds
epoch = epoch_tensor.eval()
new_epoch = True
while not enough():
logger.info('Starting new epoch #%d!', epoch)
sess.run(iterator.initializer, feed_dict={})
while not enough():
step = tf.train.global_step(sess, tf.train.get_global_step())
try:
sess.run(train_op, feed_dict={keep_prob: keep_prob_param, is_training: True})
if new_epoch:
new_epoch = False
l, reg_l, ac, summaries = sess.run(
[loss, regularization_loss, accuracy_percent, all_summaries],
feed_dict={keep_prob: keep_prob_param, is_training: False},
)
batch_writer.add_summary(summaries, global_step=step)
logger.info(
'Minibatch loss: %f, reg loss: %f, accuracy: %.2f%%',
l, reg_l, ac,
)
except tf.errors.OutOfRangeError:
logger.info('End of epoch #%d', epoch)
break
# end of epoch
previous_epoch = epoch
epoch = next_epoch.eval()
new_epoch = True
if previous_epoch % 5 == 0 and save_restore:
saver.save(sess, saver_path, global_step=previous_epoch)
def get_eval_dict(data, labels):
"""Data for evaluation."""
return {x: data, y: labels, keep_prob: 1, is_training: False}
train_l, train_ac, summaries = sess.run(
[loss, accuracy_percent, all_summaries],
feed_dict=get_eval_dict(train_data[:10000], train_labels[:10000]),
)
train_writer.add_summary(summaries, global_step=step)
test_l, test_accuracy, summaries = sess.run(
[loss, accuracy_percent, all_summaries],
feed_dict=get_eval_dict(test_data, test_labels),
)
test_writer.add_summary(summaries, global_step=step)
best_accuracy = max(best_accuracy, test_accuracy)
logger.info('Train loss: %f, train accuracy: %.2f%%', train_l, train_ac)
logger.info(
'Test loss: %f, TEST ACCURACY: %.2f%% BEST ACCURACY %.2f%% <<<<<<<',
test_l, test_accuracy, best_accuracy,
)
return best_accuracy | c2d2c56ac7dbb52d072f2397540d4d793ac0d0c4 | 16,181 |
def redirect_return():
"""Redirects back from page with url generated by url_return."""
return redirect(str(Url.get_return())) | f1ce09afef02651e0331a930e53211f9eb4f2a54 | 16,182 |
def setup(coresys: CoreSys) -> EvaluateBase:
"""Initialize evaluation-setup function."""
return EvaluateOperatingSystem(coresys) | daf3bd3ddca0085d6305535b27c28d70ac240dac | 16,183 |
def _weight_initializers(seed=42):
"""Function returns initilializers to be used in the model."""
kernel_initializer = tf.keras.initializers.TruncatedNormal(
mean=0.0, stddev=0.02, seed=seed
)
bias_initializer = tf.keras.initializers.Zeros()
return kernel_initializer, bias_initializer | 1c7652b787d4a69d3a43983c2c291c09337d06d0 | 16,184 |
import os
def inputs(eval_data, data_dir, batch_size):
"""Construct input for eye evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the eye data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not eval_data:
data_dir = os.path.join(data_dir, 'tr')
filenames = []
for folder_name in LABLES:
folder_path = os.path.join(data_dir, folder_name)
filenames += [os.path.join(folder_path, f) for f in os.listdir(folder_path)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
data_dir = os.path.join(data_dir, 'te')
filenames = []
for folder_name in LABLES:
folder_path = os.path.join(data_dir, folder_name)
filenames += [os.path.join(folder_path, f) for f in os.listdir(folder_path)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_eye(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(resized_image)
# Fix the shape of Tensor
float_image.set_shape([height, width, 3])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.1
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=False) | e5b8457bcc370df37e995db100ac5d0470df2fa8 | 16,185 |
def get_non_ready_rs_pod_names(namespace):
"""
get names of rs pods that are not ready
"""
pod_names = []
rs_pods = get_pods(namespace, selector='redis.io/role=node')
if not rs_pods:
logger.info("Namespace '%s': cannot find redis enterprise pods", namespace)
return []
for rs_pod in rs_pods:
pod_name = rs_pod['metadata']['name']
if "status" in rs_pod and "containerStatuses" in rs_pod["status"]:
for container_status_entry in rs_pod["status"]["containerStatuses"]:
container_name = container_status_entry['name']
is_ready = container_status_entry["ready"]
if container_name == RLEC_CONTAINER_NAME and not is_ready:
pod_names.append(pod_name)
return pod_names | 167922c4fa03127a3371f2c5b7516bb6462c6253 | 16,186 |
def lookup_material_probase(information_extractor, query, num):
"""Lookup material in Probase"""
material_params = {
'instance': query,
'topK': num
}
result = information_extractor.lookup_probase(material_params)
rank = information_extractor.rank_probase_result_material(result)
return rank | 9cecf99e3a9689f85788df21ef01d4e86c9a392d | 16,187 |
def get_unexpected_exit_events(op):
"""Return all unexpected exit status events."""
events = get_events(op)
if not events:
return None
return [e for e in events if is_unexpected_exit_status_event(e)] | 171158d16c34e2764bc8c91f4888863c162043c4 | 16,188 |
async def delete_user(username: str) -> GenericResponse:
"""Delete concrete user by username"""
try:
await MongoDbWrapper().remove_user(username)
except Exception as exception_message:
raise DatabaseException(error=exception_message)
return GenericResponse(detail="Deleted user") | 8b2756922ab79d058097105fa8cd000396350a3b | 16,189 |
def get_changelog():
"""download ChangeLog.txt from github, extract latest version number, return a tuple of (latest_version, contents)
"""
# url will be chosen depend on frozen state of the application
source_code_url = 'https://github.com/pyIDM/pyIDM/raw/master/ChangeLog.txt'
new_release_url = 'https://github.com/pyIDM/pyIDM/releases/download/extra/ChangeLog.txt'
url = new_release_url if config.FROZEN else source_code_url
# url = new_release_url
# get BytesIO object
log('check for PyIDM latest version ...')
buffer = download(url, verbose=False)
if buffer:
# convert to string
contents = buffer.getvalue().decode()
# extract version number from contents
latest_version = contents.splitlines()[0].replace(':', '').strip()
return latest_version, contents
else:
log("check_for_update() --> couldn't check for update, url is unreachable")
return None | 7c8df0cbc5fa85642e4e23106006445f59539a1f | 16,190 |
def get_train_tags(force=False):
""" Download (if needed) and read the training tags.
Keyword Arguments
-----------------
force : bool
If true, overwrite existing data if it already exists.
"""
download_train_tags(force=force)
return read_tags(train_tags_file_path) | 5d67422a275011a719c0121206397fb99e6e4f70 | 16,191 |
def select_own(ligands, decoys, scores):
"""Select ligand ids and decoy ids from full ranked ids."""
#scores format is full OUTDOCK line
selected = set(ligands)
selected.update(decoys)
results = []
for scoreline in scores:
#id = scoreline[extract_all.zincCol] #refer to correct column always
id = scoreline[extract_all.zincCol].split('.')[0] #refer to correct column always
# maybe in this form: zinccode.prot
#print id
if id in selected:
results.append(scoreline)
#print scoreline
return results | 444555a30571e61fad7eac36389e2dd638313744 | 16,192 |
def create_app():
"""
生成FatAPI对象
:return:
"""
app = FastAPI(
debug=settings.DEBUG,
title=settings.PROJECT_NAME, # 项目名称
description=settings.DESCRIPTION, # 项目简介
docs_url=f"{settings.API_V1}/docs", # 自定义 docs文档的访问路径
redoc_url=f"{settings.API_V1}/redocs", # 禁用 redoc文档
openapi_url=f"{settings.API_V1}/openapi.json"
)
app.add_middleware(SessionMiddleware, secret_key="jwt")
# 其余的一些全局配置可以写在这里 多了可以考虑拆分到其他文件夹
register_redis(app)
# 注册mysql
register_mysql(app)
# 注册mongodb
register_mongodb(app)
# 注册ws
register_ws(app)
# 跨域设置
register_cors(app)
# 注册路由
register_router(app)
# 注册定时任务
# register_task(app)
# 注册捕获全局异常
register_exception(app)
# 请求拦截
register_middleware(app)
# if settings.DEBUG:
# register_static_file(app)
return app | 10e106e9968344cf8382ecbd24b1029b3548be79 | 16,193 |
def cmp_text_file(text, file):
"""returns True when text and file content are identical
"""
fh = open(file)
ftext = fh.read()
fh.close()
return cmp(ftext, text) | ecf10004cd3fa230d0e794c4c89e45ca91e7e40e | 16,194 |
def get_alignment_summary(seq_info):
"""
Determine the consensus sequence of an alignment, and create position matrix
Definition of consensus: most common base represented at that position.
"""
consensus_sequence = []
position_matrix = []
for position in seq_info:
#Ignore any ambiguous basecalls - accept A, T, C, G, and 'gap'
base_counts = {
'a':position['bases'].count('a')+position['bases'].count('A'),
't':position['bases'].count('t')+position['bases'].count('T'),
'c':position['bases'].count('c')+position['bases'].count('C'),
'g':position['bases'].count('g')+position['bases'].count('G'),
'-':position['bases'].count('-'),
}
#print(base_counts)
max_basecalls = [key for key, count in base_counts.items() if count == max(base_counts.values())]
if len(max_basecalls) == 1:
consensus_sequence.append(max_basecalls[0])
else:
consensus_sequence.append('n')
#Assembling position_matrix
position_matrix.append(base_counts)
return (''.join(consensus_sequence), position_matrix) | f91e4dcea2f4570a194524970fdbc95eacc455b2 | 16,195 |
def calculate_pcx_chord_emission(impact_factor, Ti, w0, mu, Lnu, Vouter, rmax=40.0, nr=101, nlambda=2000,
Lne=2.5, R_outer=35):
"""Calculates PCX emission with only the outer boundary spinning for a given impact factor
Args:
impact_factor (float): impact factor for chord
Ti (float): ion temperature in eV
w0 (float): central wavelength
mu (float): mass in amu
Lnu (float): momentum diffusion length
Vouter (float): velocity in m/s for outer boundary
rmax (float): end of the plasma
nr (int): number of radial points to integrate chord with
nlambda (int): number of wavelength points
Lne (float): density gradient scale length at rmax
R_outer (float): velocity at outer boundary
Returns:
tuple: (np.ndarray, np.ndarray) wavelength and spectrum
"""
r, theta, x = calculate_r_theta_x_from_impact_factor(impact_factor, rmax=rmax, npts=nr)
vel = pcx_velocity_profile(r, Lnu, R_outer, Vouter)
# fig, ax = plt.subplots()
# ax.plot(r, vel)
# plt.show()
vel_adjusted = vel * np.cos(theta)
# ToDo: Should really iterate over w0 to handle the He II complex
w_shifted_max = models.doppler_shift(w0, np.max(vel_adjusted))
sigma = models.doppler_broadening(w_shifted_max, mu, Ti)
wavelength = np.linspace(-1, 1, nlambda) * 10.0 * sigma + w_shifted_max
# Now to build a big spectrum matrix
w_shifts = models.doppler_shift(w0, vel_adjusted)
full_spectrum = models.gaussian(wavelength[np.newaxis, :], w_shifts[:, np.newaxis], sigma, amp=1.0, norm=False)
# fig, ax = plt.subplots()
# ax.plot(vel_adjusted, w_shifts)
# plt.show()
dens = density_profile(r, rmax, Lne)
dens = dens[:, np.newaxis]
full_spectrum *= dens ** 2
# fig, ax = plt.subplots()
# for idx, spec in enumerate(full_spectrum):
# ax.plot(wavelength, spec, 'C0')
# ax.axvline(w_shifts[idx], color='C1')
# plt.show()
# print(full_spectrum.shape)
spectrum = np.trapz(full_spectrum, x=x, axis=0)
# print(spectrum.shape)
# fig, ax = plt.subplots()
# ax.plot(wavelength, spectrum / spectrum.max(), 'C1')
# plt.show()
return wavelength, spectrum | 611d80973d3767b16fbd26c7ccebee8fc5390c95 | 16,196 |
def _get_variables(exp:Experiment, config: dict) -> dict:
"""Process the configuration's variables before rendering it"""
return {key: value.format(exp=exp) for key, value in config.get("variables", {}).items()} | 1b819c93ef079557908c216dc5c9fa75d55fe0f3 | 16,197 |
def func_calc_M(S):
"""
Use molecules structure/symbol to calculate molecular weight
Parameter:
S : structrue in a format: (atomType number) separated by '-' or blank space
number of '-' and spaces does not matter
precendent: '-' > blank space
Example 1:
C2H3O4N5
Example 2:
C2 - H3 - O4 - N5
Example 3:
C2 H3 O4 N5
Example 4:
C2 H3 - O4 - N5
Return:
M : molecular weight (g/mol)
"""
##Test list
##Slist = [ 123, ' ', '- - ', '---', '1,2,','1 +','4 $', #bad
# 'C3H4O5Br1Cl2', 'CHOBrCl','Br Br BrBr', #good
# 'C3 - H -2 - 2 - O', 'C3 - H2 2 - O' #bad]
log = {'nice':True, }
# define Periodic Table
PT = { 'H':1.008, 'B':10.81, 'C':12.01, 'N':14.01, 'O':16.00, 'F':19.00,
'P':30.91, 'S':32.06, 'Cl':35.45, 'Br':79.90, 'I':126.90 }
if not isinstance(S,str):
log['nice'] = False
log['info'] = 'Error: Molecule structure has to be a string'
return log, 0.0
S = S.lower()
proS = []
# format: split by '-' then split by blank space
for t in S.split('-'): proS += t.split()
if len(proS) == 0:
log['nice'] = False
log['info'] = 'Error: empty inputs'
return log, 0.0
proSS = []
# 1D: split to [ character number character number ]
for t in proS:
if t.isdigit():
proSS.append(int(t))
elif t.isalpha():
proSS.append(t)
elif t.isalnum():
stmp = ''
for c in t:
if c.isdigit():
if stmp.isalpha():
proSS.append(stmp)
stmp = ''
else:
if stmp.isdigit():
proSS.append(int(stmp))
stmp = ''
stmp += c
if stmp.isdigit():
proSS.append(int(stmp))
else:
proSS.append(stmp)
else:
log['nice'] = False
log['info'] = 'Error: input < {:} > is not correctly defined'.format(t)
return log, 0.0
proSSS = []
# 1D: split to [ atomtype number atomtype number ]
for t in proSS:
if isinstance(t,int):
proSSS.append(t)
else:
# for character, it may have special cases like Br, Cl
while True:
if 'br' in t or 'cl' in t:
ndx = t.find('br') if 'br' in t else t.find('cl')
if ndx > 0: proSSS += [ c for c in t[:ndx] ]
proSSS.append(t[ndx:ndx+2])
if len(t) >= ndx + 2:
t = t[ndx+2:]
else:
proSSS += [ c for c in t ]
break
else:
proSSS += [ c for c in t ]
break
# No adjacent numbers is allowed
# However the number of each adjacent character is defined at 1
# Consider cases like:
# C 1 2 H <bad>
# C C C 3 <good>
# C 1 H 3 <good>
if not isinstance(proSSS[0],str):
log['nice'] = False
log['info'] = 'Error: the atomtype has to be in the first input along with its numbers\n' + \
' : < {:} > is not correctly defined'.format(proSSS[0])
return log, 0.0
bo = False
for t in proSSS:
if isinstance(t,int):
if bo:
log['nice'] = False
stmp = t
break
bo = True
else:
bo = False
if not log['nice']:
log['info'] = 'Error: no adjacent number inputs is allowd\n' + \
' : < {:} > is not correctly defined'.format(stmp)
return log, 0.0
i = 0
proSSSS = []
# 2D: [ [atomtype, number], [atomtype, number], ... ]
while i < len(proSSS):
j = i + 1
if j < len(proSSS) and isinstance(proSSS[j],int):
proSSSS.append([proSSS[i],proSSS[j]])
i = j
else:
proSSSS.append([proSSS[i],1])
i += 1
# time to check for Periodic Table
M = 0.0
for t in proSSSS:
tmp = t[0].capitalize()
if tmp in PT:
M += PT[tmp] * t[1]
else:
log['nice'] = False
log['info'] = 'Error: atomtype < {:} > is not defined in Periodic Table'.format(tmp)
break
return log, M | ed8e3d5ccd5305caccfac64cb0ecb200fde650eb | 16,198 |
def find_NN(ngbrof, ngbrin, distance_ULIM=NP.inf, flatten=False, parallel=False,
nproc=None):
"""
-----------------------------------------------------------------------------
Find all nearest neighbours of one set of locations in another set of
locations within a specified distance.
Inputs:
ngbrof [numpy array] Locations for nearest neighbours are to be
determined. Has dimensions MxK where M is the number of locations.
ngbrin [numpy array] Locations from which nearest neighbours are to be
chosen for the locations in ngbrof. Has dimensions NxK.
distance_ULIM
[scalar] Maximum search radius to look for neighbours.
Default=NP.inf
flatten [boolean] If set to True, flattens the output of the nearest
neighbour search algorithm to yield two separate sets of matching
indices - one for ngbrof and the other for ngbrin. Default=False
parallel [boolean] specifies if parallelization is to be invoked. False
(default) means only serial processing. Parallelization is done
over ngbrof
nproc [scalar] specifies number of independent processes to spawn.
Default=None, means automatically determines the number of
process cores in the system and use one less than that to
avoid locking the system for other processes. Applies only
if input parameter 'parallel' (see above) is set to True.
If nproc is set to a value more than the number of process
cores in the system, it will be reset to number of process
cores in the system minus one to avoid locking the system out
for other processes
Outputs:
List containing three items. The first item is a list of M lists where each
of the M inner lists corresponds to one entry in ngbrof and the elements in
the inner list contains indices to ngbrin that are the nearest neighbours of
that specific ngbrof (same as output of cKDTree.query_ball_tree()). The
second item in the output list is a numpy array of indices to ngbrof
(obtained from the first item if input keyword flatten is set to True) or
None (if input keyword flatten is set to False). The third item in the output
list is a numpy array of indices to ngbrin that is a valid neighbour of
ngbrof (obtained from the first item if input keyword flatten is set to
True) or None (if input keyword flatten is set to False).
-----------------------------------------------------------------------------
"""
try:
ngbrof, ngbrin
except NameError:
raise NameError('ngbrof and ngbrin must be specified for finding nearest neighbours.')
if (ngbrof.shape[1] != ngbrin.shape[1]):
raise ValueError('ngbrof and ngbrin must contain same number of columns')
if parallel or (nproc is not None):
if nproc is None:
nproc = max(MP.cpu_count()-1, 1)
else:
nproc = min(nproc, max(MP.cpu_count()-1, 1))
split_ind = NP.arange(ngbrof.shape[0]/nproc, ngbrof.shape[0], ngbrof.shape[0]/nproc)
split_ngbrof_list = NP.split(ngbrof, split_ind, axis=0)
ngbrin_list = [ngbrin] * len(split_ngbrof_list)
distance_ULIM_list = [distance_ULIM] * len(split_ngbrof_list)
pool = MP.Pool(processes=nproc)
lolol = pool.map(find_NN_arg_splitter, IT.izip(split_ngbrof_list, ngbrin_list, distance_ULIM_list))
pool.close()
pool.join()
indNN_list = [subitem for item in lolol for subitem in item]
else:
kdtself = KDT(ngbrof)
kdtother = KDT(ngbrin)
indNN_list = kdtself.query_ball_tree(kdtother, distance_ULIM, p=2.0)
ind_ngbrof = None
ind_ngbrin = None
if flatten:
list_of_ind_tuples = [(i,ind) for i,item in enumerate(indNN_list) for ind in item]
ind_ngbrof, ind_ngbrin = zip(*list_of_ind_tuples)
return [indNN_list, NP.asarray(ind_ngbrof), NP.asarray(ind_ngbrin)] | 131d136ad92900f3ee624982f70234070d0d76a6 | 16,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.