content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def binary_accuracy(*, logits, labels):
"""Accuracy of binary classifier, from logits."""
p = jax.nn.sigmoid(logits)
return jnp.mean(labels == (p > 0.5))
|
f7795c8d7a945e5e5e97475888cf9e5b65aa1415
| 19,700 |
from re import DEBUG
def create_app(app_name=None, blueprints=None, config=None):
"""
Diffy application factory
:param config:
:param app_name:
:param blueprints:
:return:
"""
if not blueprints:
blueprints = DEFAULT_BLUEPRINTS
else:
blueprints = blueprints + DEFAULT_BLUEPRINTS
if not app_name:
app_name = __name__
app = Flask(app_name)
configure_app(app, config)
configure_blueprints(app, blueprints)
configure_extensions(app)
configure_logging(app)
if app.logger.isEnabledFor(DEBUG):
p_config = pformat(app.config)
app.logger.debug(f"Current Configuration: {p_config}")
return app
|
8bde6cbe8e01abeca10f6d1d1d2da5f20fcb6789
| 19,701 |
def extract_tag(inventory, url):
"""
extract data from sphinx inventory.
The extracted datas come from a C++ project
documented using Breathe. The structure of the inventory
is a dictionary with the following keys
- cpp:class (class names)
- cpp:function (functions or class methods)
- cpp:type (type names)
each value of this dictionary is again a dictionary with
- key : the name of the element
- value : a tuple where the third index is the url to the corresponding documentation
Parameters
----------
inventory : dict
sphinx inventory
url : url of the documentation
Returns
-------
dictionary with keys class, class_methods, func, type
but now the class methods are with their class.
"""
classes = {}
class_methods = {}
functions = {}
types = {}
get_relative_url = lambda x: x[2].replace(url, '')
for c, v in inventory.get('cpp:class', {}).items():
classes[c] = get_relative_url(v)
class_methods[c] = {}
for method, v in inventory.get('cpp:function', {}).items():
found = False
for c in class_methods.keys():
find = c + '::'
if find in method:
class_methods[c][method.replace(find, '')] = get_relative_url(v)
found = True
break
if not found:
functions[method] = get_relative_url(v)
for typename, v in inventory.get('cpp:type', {}).items():
types[typename] = get_relative_url(v)
return {'class': classes,
'class_methods': class_methods,
'func':functions,
'type': types
}
|
dcda1869fb6a44bea3b17f1d427fe279ebdc3a11
| 19,702 |
def strip_parens(s):
"""Strip parentheses around string"""
if not s:
return s
if s[0] == "(" and s[-1] == ")":
return strip_parens(s[1:-1])
else:
return s
|
ee4c9ce6ee769a86a2e2e39159aa9eaa5fd422c6
| 19,703 |
import ast
def custom_eval(node, value_map=None):
"""
for safely using `eval`
"""
if isinstance(node, ast.Call):
values = [custom_eval(v) for v in node.args]
func_name = node.func.id
if func_name in {"AVG", "IF"}:
return FUNCTIONS_MAP[func_name](*values)
elif func_name in FUNCTIONS_MAP:
return FUNCTIONS_MAP[func_name](values)
else:
raise NotImplementedError(func_name)
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.BinOp):
return OPERATORS[type(node.op)](
custom_eval(node.left, value_map=value_map),
custom_eval(node.right, value_map=value_map),
)
elif isinstance(node, ast.UnaryOp):
return OPERATORS[type(node.op)](custom_eval(node.operand, value_map=value_map))
elif isinstance(node, ast.Compare):
return OPERATORS[type(node.ops[0])](
custom_eval(node.left, value_map=value_map),
custom_eval(node.comparators[0], value_map=value_map),
)
elif isinstance(node, ast.Name):
name = node.id
if value_map is None:
raise ValueError("value_map must not be None")
if name not in value_map:
raise KeyError()
try:
return value_map[name]
except KeyError as e:
raise e
else:
raise ArithmeticError()
|
a9ff29455ee90a83f5c54197633153d5b9d0fdbc
| 19,704 |
def validate_dict(input,validate):
"""
This function returns true or false if the dictionaries pass regexp
validation.
Validate format:
{
keyname: {
substrname: "^\w{5,10}$",
subintname: "^[0-9]+$"
}
}
Validates that keyname exists, and that it contains a substrname
that is 5-10 word characters, and that it contains subintname which
is only integers.
"""
# Create a local copy to work our magic on.
input = dict(input)
if not type(input) == dict and type(validate) == dict:
raise ValueError, "Values to validate_dict must be dicts."
for key in validate.keys():
if not input.get(key):
# Key didn't exist.
return False
else:
if not type(input[key]) == type(validate[key]) and not type(input[key]) == unicode:
# The types of keys didn't match.
return False
elif type(input[key]) == dict:
if not validate_dict(input[key],validate[key]):
# The sub-validate didn't pass.
return False
else:
del input[key]
elif type(input[key]) == str or type(input[key]) == unicode:
if not validate_str(input[key],validate[key]):
# The sub-validate didn't pass.
return False
else:
del input[key]
elif type(input[key]) == int:
del input[key]
pass
elif type(input[key]) == float:
del input[key]
pass
else:
# I don't know how to deal with this case!
return False
if input == {}:
return True
else:
return False
|
0a221f5586f4464f4279ab4ce3d22019e247659b
| 19,705 |
def build_windows_and_pods_from_events(backpressure_events, window_width_in_hours=1) -> (list, list):
"""
Generate barchart-friendly time windows with counts of backpressuring durations within each window.
:param backpressure_events: a list of BackpressureEvents to be broken up into time windows
:param window_width_in_hours: how wide each time window should be in hours
:return: a dictionary with timestamp keys to list of BackpressureEvent values
"""
# The logic below is highly dependent on events being sorted by start timestamp oldest to newest.
sorted_events = backpressure_events.copy()
sorted_events.sort(key=lambda e: e.start)
interval = sorted_events[0].start.replace(minute=0, second=0, microsecond=0)
next_interval = interval + timedelta(hours=window_width_in_hours)
all_pods = set(())
windows = [BackpressureWindow(interval)]
for event in sorted_events:
all_pods.add(event.pod)
while event.start >= next_interval:
interval = next_interval
windows.append(BackpressureWindow(interval))
next_interval = next_interval + timedelta(hours=window_width_in_hours)
windows[-1].add_event(event)
all_pods_list = list(all_pods)
all_pods_list.sort()
return windows, all_pods_list
|
78adebe54d883a7251c04e250f7c14e47043d40e
| 19,706 |
import requests
import json
def package_search(api_url, org_id=None, params=None, start_index=0, rows=100, logger=None, out=None):
"""
package_search: run the package_search CKAN API query, filtering by org_id, iterating by 100, starting with 'start_index'
perform package_search by owner_org:
https://data.ioos.us/api/3/action/package_search?q=owner_org:
"""
action = "package_search"
if org_id is not None:
if params is not None:
payload = {'q': "owner_org:{id}+{params}".format(id=org_id, params="+".join(params)), 'start': start_index, 'rows': rows}
print(payload)
else:
payload = {'q': "owner_org:{id}".format(id=org_id), 'start': start_index, 'rows': rows}
print(payload)
else:
if params is not None:
payload = {'q': "{params}".format(params=" ".join(params)), 'start': start_index, 'rows': rows}
print(payload)
else:
payload = {'start': start_index, 'rows': rows}
print(payload)
url = ("/").join([api_url, "action", action])
if logger:
logger.info("Executing {action}. URL: {url}. Parameters {params}".format(action=action, url=url, params=payload))
#r = requests.get(url=url, headers = {'content-type': 'application/json'}, params=payload)
#r = requests.post(url=url, headers = {'content-type': 'application/json'}, data=json.dumps(payload))
r = requests.post(url=url, headers = {'content-type': 'application/json'}, json=payload)
print(json.dumps(payload))
print(r.text)
# either works:
#result = json.loads(r.text)
result = r.json()
# this is the full package_search result:
#if out:
# out.write(json.dumps(result, indent=4, sort_keys=True, ensure_ascii=False))
return result
|
642a869931d45fe441a146cb8e931dc530170c37
| 19,707 |
def voigt_fit(prefix,x,slice,c,vary):
"""
This function fits a voigt to a spectral slice. Center value can be set to constant or floated, everything else is floated.
Parameters:
prefix: prefix for lmfit to distinguish variables during multiple fits
x: x values to use in fit
slice: slice to be fit
c: center of voigt obtained from max value of the slice
vary: Boolean, determines whether c is floated default is True
Returns:
out: lmfit fit output
"""
model = VoigtModel(prefix=prefix)
pars = model.guess(slice,x=x)
pars[str(prefix)+'center'].set(c,vary=vary)
out = model.fit(slice,pars,x=x)
return out
|
034810cb6a0ac8efb311182df3d65cf0bd6002d9
| 19,708 |
from typing import List
def turn_coordinates_into_list_of_distances(list_of_coordinates: List[tuple]):
"""
Function to calculate the distance between coordinates in a list. Using the
'great_circle' for measuring here, since it is much faster (but less precise
than 'geodesic').
Parameters
----------
list_of_coordinates : List[tuple]
A list containing tuples with coordinates
Returns
-------
list_of_distances : List[float]
A list containing the distance in kilometers between two coordinates.
Subsequent values are added up, thus the values are increasing.
"""
list_of_distances = []
previous_coordinates = None
for coordinates in list_of_coordinates:
if not previous_coordinates:
list_of_distances.append(0.)
else:
dist = distance.great_circle([previous_coordinates[1], previous_coordinates[0]], [coordinates[1], coordinates[0]])
list_of_distances.append(round(list_of_distances[-1] + dist.km, 4))
previous_coordinates = coordinates
return list_of_distances
|
5fdc0198b533604ec3d935224c7b2b634670083e
| 19,709 |
import json
def getPileupDatasetSizes(datasets, phedexUrl):
"""
Given a list of datasets, find all their blocks with replicas
available, i.e., blocks that have valid files to be processed,
and calculate the total dataset size
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:return: a dictionary of datasets and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
sizeByDset = {}
if not datasets:
return sizeByDset
urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets]
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if row['data'] is None:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
sizeByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
sizeByDset.setdefault(dataset, 0) # flat dict in the format of blockName: blockSize
try:
for item in rows['phedex']['block']:
sizeByDset[dataset] += item['bytes']
except Exception as exc:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s" % (dataset, str(exc)))
sizeByDset[dataset] = None
return sizeByDset
|
48d77aa47998204ff99df188ef830cae647ac9b9
| 19,710 |
def convertpo(inputpofile, outputpotfile, template, reverse=False):
"""reads in inputpofile, removes the header, writes to outputpotfile."""
inputpo = po.pofile(inputpofile)
templatepo = po.pofile(template)
if reverse:
swapdir(inputpo)
templatepo.makeindex()
header = inputpo.header()
if header:
inputpo.units = inputpo.units[1:]
for i, unit in enumerate(inputpo.units):
for location in unit.getlocations():
templateunit = templatepo.locationindex.get(location, None)
if templateunit and templateunit.source == unit.source:
break
else:
templateunit = templatepo.findunit(unit.source)
unit.othercomments = []
if unit.target and not unit.isfuzzy():
unit.source = unit.target
elif not reverse:
if inputpo.filename:
unit.addnote("No translation found in %s" % inputpo.filename, origin="programmer")
else:
unit.addnote("No translation found in the supplied source language", origin="programmer")
unit.target = ""
unit.markfuzzy(False)
if templateunit:
unit.addnote(templateunit.getnotes(origin="translator"))
unit.markfuzzy(templateunit.isfuzzy())
unit.target = templateunit.target
if unit.isobsolete():
del inputpo.units[i]
outputpotfile.write(str(inputpo))
return 1
|
6954354db5ca9c660e326eeae23906853743eb57
| 19,711 |
def do_fk5(l, b, jde):
"""[summary]
Parameters
----------
l : float
longitude
b : float
latitude
jde : float
Julian Day of the ephemeris
Returns
-------
tuple
tuple(l,b)
"""
T = (jde - JD_J2000) / CENTURY
lda = l - deg2rad(1.397)*T - deg2rad(0.00031)*T*T
delta_lon = -deg2rad(0.09033/3600) + deg2rad(0.03916/3600)*(cos(lda)+sin(lda))*tan(b)
delta_lat = deg2rad(0.03916/3600)*(np.cos(lda)- np.sin(lda))
l += delta_lon
b += delta_lat
return l,b
|
2ccc96aab8ddfcbe93d7534a01b0f262c0330053
| 19,712 |
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.8 ** (epoch // 1))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
|
ce4b5a3aa70ab07791af3bd41e14758e568fb1cc
| 19,713 |
import yaml
def get_defaults(module, *args):
"""
Find an internal defaults data file, load it using YAML, and return the resulting
dictionary.
Takes the dot-separated module path (e.g. "abscal.wfc3.reduce_grism_extract"), splits
off the last item (e.g. ["abscal.wfc3", "reduce_grism_extract"]), adds ".yaml" to the
end of the second item (e.g. ["abscal.wfc3", "reduce_grism_extract.yaml"]), adds
".defaults" to the first item
(e.g. ["abscal.wfc3.defaults", "reduce_grism_extract.yaml"]), and feeds the result
into :code:`get_data_file()`. Then loads the resulting file as a dictionary, and
builds a new dictionary consisting of:
- All key/value pairs in the "all" dictionary
- All key/value pairs in any dictionary matching any of the keyword arguments
- The above two items from any dictionary matching any of the keyword arguments,
extending recursively into the depths of the dictionary.
The result will be a flat (i.e. single-level) dictionary.
Parameters
----------
module : str
The module to search in, using standard dot separators (e.g. abscal.wfc3)
args : list
A list of specific keyword arguments, provided to ensure the inclusion of
specific sub-values or sub-dictionaries.
Returns
-------
defaults : dict
Dictionary of default parameters.
"""
items = module.split(".")
module = ".".join(items[:-1])
file_name = items[-1]+".yaml"
defaults_file = get_data_file(module, file_name, defaults=True)
with open(defaults_file, "r") as inf:
defaults_dict = yaml.safe_load(inf)
defaults = _extract_dict(defaults_dict, {}, args)
return defaults
|
a92e37f75c4f967c2b23391a817fb14118b89a8f
| 19,714 |
import shlex
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if _tryorder is None:
with _lock:
if _tryorder is None:
register_standard_browsers()
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
elif command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser")
|
12c2ca5fdd93964527330a0694d69a8d4e84ee12
| 19,715 |
import binascii
def _bin_to_long(x):
"""
Convert a binary string into a long integer
This is a clever optimization for fast xor vector math
"""
return int(binascii.hexlify(x), 16)
|
54b50ffea715bf127eabd7e82aada36e4717c288
| 19,716 |
def update_book(username, book_id, data):
"""Update book data"""
cursor, conn = db_sql.connect('books.db')
keys = list(data.keys())
sql = ("UPDATE " + username + " SET " + " = ?, ".join(keys) +
" = ? WHERE _id = ?")
temp_list = []
for key in keys:
temp_list.append(data[key])
temp_list.append(book_id)
cursor.execute(sql, tuple(temp_list))
conn.commit()
conn.close()
return cursor.lastrowid
|
2dcc2970cec8f53c90c72f341092f7cb7c7d6232
| 19,717 |
def score_retrievals(label, retrievals):
"""
Evaluating the current retrieval experiment
Args:
-----
label: string
label corresponding to the query
retrivals: list
list of strings containing the ranked labels corresponding to the retrievals
tot_labels: integer
number of images with the current label. We need this to compute recalls
"""
# retrievals = retrievals[1:] # we do not account rank-0 since it's self-retrieval
relevant_mask = np.array([1 if r==label else 0 for r in retrievals])
num_relevant_retrievals = np.sum(relevant_mask)
if(num_relevant_retrievals == 0):
print(label)
metrics = {
"label": label,
"p@1": -1,
"p@5": -1,
"p@10": -1,
"p@50": -1,
"p@rel": -1,
"mAP": -1,
"r@1": -1,
"r@5": -1,
"r@10": -1,
"r@50": -1,
"r@rel": -1,
"mAR": -1
}
return metrics
# computing precision based metrics
precision_at_rank = np.cumsum(relevant_mask) / np.arange(1, len(relevant_mask) + 1)
precision_at_1 = precision_at_rank[0]
precision_at_5 = precision_at_rank[4]
precision_at_10 = precision_at_rank[9]
precision_at_50 = precision_at_rank[49]
precision_at_rel = precision_at_rank[num_relevant_retrievals - 1]
average_precision = np.sum(precision_at_rank * relevant_mask) / num_relevant_retrievals
# computing recall based metrics
recall_at_rank = np.cumsum(relevant_mask) / num_relevant_retrievals
recall_at_1 = recall_at_rank[0]
recall_at_5 = recall_at_rank[4]
recall_at_10 = recall_at_rank[9]
recall_at_50 = recall_at_rank[49]
recall_at_rel = recall_at_rank[num_relevant_retrievals - 1]
average_recall = np.sum(recall_at_rank * relevant_mask) / num_relevant_retrievals
metrics = {
"label": label,
"p@1": precision_at_1,
"p@5": precision_at_5,
"p@10": precision_at_10,
"p@10": precision_at_50,
"p@rel": precision_at_rel,
"mAP": average_precision,
"r@1": recall_at_1,
"r@5": recall_at_5,
"r@10": recall_at_10,
"r@10": recall_at_50,
"r@rel": recall_at_rel,
"mAR": average_recall
}
return metrics
|
c9a3a24c2c6e5a2986387db88710da43984bd862
| 19,718 |
def default_add_one_res_2_all_res(one_res: list, all_res: list) -> list:
"""
默认函数1: one_res 增加到all_res
:param one_res:
:param all_res:
:return:
"""
for i in one_res:
for j in i:
all_res.append(j)
return all_res
|
9c2e83ffaa7c67759f8b3d7cf30354d7cf7ca030
| 19,719 |
from typing import Iterable
import re
def search_gene(search_string: str, **kwargs) -> Iterable[Gene]:
""" Symbols have been separated into search_gene_symbol - this returns Gene objects """
CONSORTIUM_REGEX = {
r"(ENSG\d+)": AnnotationConsortium.ENSEMBL,
r"Gene:(\d+)": AnnotationConsortium.REFSEQ,
r"GeneID:(\d+)": AnnotationConsortium.REFSEQ,
r"Gene ID:(\d+)": AnnotationConsortium.REFSEQ,
}
for c_regex, annotation_consortium in CONSORTIUM_REGEX.items():
if m := re.match(c_regex, search_string, re.IGNORECASE):
gene_id = m.group(1)
return Gene.objects.filter(identifier=gene_id, annotation_consortium=annotation_consortium)
return []
|
768c6ac712b6660b78b2bead3be6f0000541696f
| 19,720 |
import os
def get_steam_libraries():
"""Returns list of found Steam library folders."""
found_libraries = []
if os.path.isdir(STEAM_INSTALL_DIR + '/steamapps/common'):
found_libraries.append(STEAM_INSTALL_DIR)
libraries_config = {}
if LIBRARY_FOLDERS_FILE:
libraries_config = vdf.load(open(LIBRARY_FOLDERS_FILE))
if libraries_config:
keyword = ''
if 'libraryfolders' in libraries_config:
keyword = 'libraryfolders'
elif 'LibraryFolders' in libraries_config:
keyword = 'LibraryFolders'
for library in libraries_config[keyword].values():
library_path = ''
if 'path' in library:
library_path = library['path']
elif isinstance(library, str):
library_path = library
if library_path and library_path not in found_libraries and os.path.isdir(library_path + '/steamapps/common'):
found_libraries.append(library_path)
return found_libraries
|
2a2e0460929dd0b8801a5caa2ebde055f14b2317
| 19,721 |
import logging
def detect_wings_simple(img, pixel_size=1,
ds=2, layers=2, thresh_window=1.8e3,
minarea=0.5e6, maxarea=2e6, minsolidity=.6,
minaspect=.3, plot=False, threshold_fun=None):
"""
simple wing detection via adaptive thresholding and some filtering by shape
default area 0.5-2 mm^2
Parameters
----------
img: np-array (2-dim)
the input image
pixel_size: scalar
pixel size in input image
ds: scalar
downsampling factor at each layer
layers: scalar
how may downsampling layers to calculate
thresh_window: integer
window for adaptive threshold, in original image pixels
minarea: scalar
minimum size of objects to detect, in units^2
maxarea: scalar
maximum size of objects to detect, in units^2
minsolidity: scalar
minimal solidity of detected objects \in (0,1)
minaspect: scalar
minimal inverse aspect ratio of detected objects \in (0,1)
plot: boolean
whether to plot detections or not
threshold_fun: function pointer, optional
thresholding function to use in windows
Returns
-------
bboxes: list of 4-tuples
bounding boxes (in original image pixel units)
"""
# scale min and max area to be in pixels^2
minarea = minarea / pixel_size**2 / ds**(layers*2)
maxarea = maxarea / pixel_size**2 / ds**(layers*2)
# scale thresh window size, make sure it is odd
thresh_window = int(thresh_window / pixel_size / ds**layers)
thresh_window += 0 if thresh_window%2 == 1 else 1
logger = logging.getLogger(__name__)
# some debug output:
logger.info('wing detection started')
logger.debug('input shape: {}'.format(img.shape))
logger.debug('ds: {}, layer:{}'.format(ds, layers))
logger.debug('minarea: {}, maxarea:{}'.format(minarea, maxarea))
logger.debug('threshold window: {}'.format(thresh_window))
# downsample
pyr = [p for p in pyramid_gaussian(img, max_layer= layers, downscale = ds)]
img_ds = pyr[layers]
logger.debug('img size after ds: {}'.format(img_ds.shape))
# rescale to (0-1)
img_ds = img_ds.astype(float)
img_ds = rescale_intensity(img_ds, out_range=(0.0, 1.0))
# smooth
img_ds = gaussian_filter(img_ds, 2.0)
# adaptive threshold
if threshold_fun is None:
thrd = img_ds > threshold_local(img_ds, thresh_window)
else:
thrd = img_ds > threshold_local(img_ds, thresh_window, method='generic', param=threshold_fun)
# clean a bit
thrd = np.bitwise_not(thrd)
thrd = binary_opening(thrd, selem=disk(4))
labelled = label(thrd)
# filter objs
ls = [r.label for r in regionprops(labelled) if r.area>minarea and
r.area<maxarea and r.solidity>minsolidity and aspect(r.bbox) > minaspect]
# filtered binary
res = np.zeros(thrd.shape)
l = label(thrd)
for li in ls:
res += (l == li)
# more cleaning, plus some erosion to separate touching wings
r2 = remove_small_holes(res.astype(np.bool), 25000)
r2 = binary_erosion(r2, selem=disk(3))
# show detections
if plot:
image_label_overlay = label2rgb(label(r2), image=img_ds)
plt.imshow(image_label_overlay)
ax = plt.gca()
# get bboxes
bboxes = []
for r in regionprops(label(r2)):
# TODO: is this really necessary?
if r.area < (minarea * .8 ):
continue
bbox_scaled = np.array(r.bbox) * (ds**layers)
logger.debug('bbox: {}, upsampled: {}'.format(r.bbox, bbox_scaled))
bboxes.append(bbox_scaled)
if plot:
minr, minc, maxr, maxc = r.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
logger.info('found {} object(s)'.format(len(bboxes)) )
return bboxes
|
84c625ce35bb920d893f766c900d3172f658f905
| 19,722 |
def check_logged(request):
"""Check if user is logged and have the permission."""
permission = request.GET.get('permission', '')
if permission:
has_perm = request.user.has_perm(permission)
if not has_perm:
msg = (
"User does not have permission to exectute this action:\n"
"expected permission: {permission}").format(
permission=permission)
raise exceptions.PumpWoodUnauthorized(
message=msg, payload={
"permission": permission})
return Response(True)
|
2cf03f7336b7c8814fd380aae5209b0b8fe6dca9
| 19,723 |
def _deprecated_configs(agentConfig):
""" Warn about deprecated configs
"""
deprecated_checks = {}
deprecated_configs_enabled = [v for k, v in OLD_STYLE_PARAMETERS if len([l for l in agentConfig if l.startswith(k)]) > 0]
for deprecated_config in deprecated_configs_enabled:
msg = "Configuring %s in datadog.conf is not supported anymore. Please use conf.d" % deprecated_config
deprecated_checks[deprecated_config] = {'error': msg, 'traceback': None}
log.error(msg)
return deprecated_checks
|
e47a47a1a7dd40d04a21927730479500f934a1d1
| 19,724 |
def check_number_of_calls(object_with_method, method_name, maximum_calls, minimum_calls=1, stack_depth=2):
"""
Instruments the given method on the given object to verify the number of calls to the method is
less than or equal to the expected maximum_calls and greater than or equal to the expected minimum_calls.
"""
return check_sum_of_calls(
object_with_method,
[method_name],
maximum_calls,
minimum_calls,
stack_depth=stack_depth + 1
)
|
64bdc512753b159128e34aa7c95d60a741745fce
| 19,725 |
def strict_transport_security(reqs: dict, expectation='hsts-implemented-max-age-at-least-six-months') -> dict:
"""
:param reqs: dictionary containing all the request and response objects
:param expectation: test expectation
hsts-implemented-max-age-at-least-six-months: HSTS implemented with a max age of at least six months (15768000)
hsts-implemented-max-age-less-than-six-months: HSTS implemented with a max age of less than six months
hsts-not-implemented-no-https: HSTS can't be implemented on http only sites
hsts-not-implemented: HSTS not implemented
hsts-header-invalid: HSTS header isn't parsable
hsts-invalid-cert: Invalid certificate chain
:return: dictionary with:
data: the raw HSTS header
expectation: test expectation
includesubdomains: whether the includeSubDomains directive is set
pass: whether the site's configuration met its expectation
preload: whether the preload flag is set
result: short string describing the result of the test
"""
SIX_MONTHS = 15552000 # 15768000 is six months, but a lot of sites use 15552000, so a white lie is in order
output = {
'data': None,
'expectation': expectation,
'includeSubDomains': False,
'max-age': None,
'pass': False,
'preload': False,
'preloaded': False,
'result': 'hsts-not-implemented',
}
response = reqs['responses']['https']
# If there's no HTTPS, we can't have HSTS
if response is None:
output['result'] = 'hsts-not-implemented-no-https'
# Also need a valid certificate chain for HSTS
elif not response.verified:
output['result'] = 'hsts-invalid-cert'
elif 'Strict-Transport-Security' in response.headers:
output['data'] = response.headers['Strict-Transport-Security'][0:1024] # code against malicious headers
try:
sts = [i.lower().strip() for i in output['data'].split(';')]
# Throw an error if the header is set twice
if ',' in output['data']:
raise ValueError
for parameter in sts:
if parameter.startswith('max-age='):
output['max-age'] = int(parameter[8:128]) # defense
elif parameter == 'includesubdomains':
output['includeSubDomains'] = True
elif parameter == 'preload':
output['preload'] = True
if output['max-age']:
if output['max-age'] < SIX_MONTHS: # must be at least six months
output['result'] = 'hsts-implemented-max-age-less-than-six-months'
else:
output['result'] = 'hsts-implemented-max-age-at-least-six-months'
else:
raise ValueError
except:
output['result'] = 'hsts-header-invalid'
# If they're in the preloaded list, this overrides most anything else
# TODO: Check to see if all redirect domains are preloaded
# TODO: Check every redirect along the way for HSTS
if response is not None:
preloaded = is_hsts_preloaded(urlparse(response.url).netloc)
if preloaded:
output['result'] = 'hsts-preloaded'
output['includeSubDomains'] = preloaded['includeSubDomains']
output['preloaded'] = True
# Check to see if the test passed or failed
if output['result'] in ('hsts-implemented-max-age-at-least-six-months',
'hsts-preloaded',
expectation):
output['pass'] = True
return output
|
55c588cf1c7e214862aae4ace5c7c8a5feb9dabc
| 19,726 |
def _get_span(succ, name, resultidx=0, matchidx=0, silent_fail=False):
"""
Helper method to return the span for the given result index and name, or None.
Args:
succ: success instance
name: name of the match info, if None, uses the entire span of the result
resultidx: index of the result in success
matchidx: if there is more than one match info with that name, which one to return, if no name, ignored
silent_fail: if True, return None, if False, raise an exception if the match info is not present
Returns:
the span or None if no Span exists
"""
if resultidx >= len(succ):
if not silent_fail:
raise Exception(f"No resultidx {resultidx}, only {len(succ)} results")
return None
res = succ[resultidx]
if name:
matches = res.matches4name(name)
if not matches:
if not silent_fail:
raise Exception(f"No match info with name {name} in result")
return None
if matchidx >= len(matches):
if not silent_fail:
raise Exception(
f"No match info with index {matchidx}, length is {len(matches)}"
)
return None
ret = matches[matchidx].get("span")
else:
ret = res.span
if ret is None:
if silent_fail:
return None
else:
raise Exception("No span found")
return ret
|
1fc6208f1aa7289a53e4e64c041abb71498a2eeb
| 19,727 |
import random
def gen_k_arr(K, n):
"""
Arguments:
K {int} -- [apa numbers]
n {int} -- [trial numbers]
"""
def random_sel(K, trial=200):
count_index = 0
pool = np.arange(K)
last = None
while count_index < trial:
count_index += 1
random.shuffle(pool)
if pool[0] == last:
swap_with = random.randrange(1, len(pool))
pool[0], pool[swap_with] = pool[swap_with], pool[0]
for item in pool:
yield item
last = pool[-1]
if K <= 1:
return np.repeat(K - 1, n)
else:
k_lst = list(random_sel(K, trial=n))
return np.array(k_lst)
|
c66084faa8903455835973226ea6ca570239a1ec
| 19,728 |
def tau_data(spc_dct_i,
spc_mod_dct_i,
run_prefix, save_prefix, saddle=False):
""" Read the filesystem to get information for TAU
"""
# Set up all the filesystem objects using models and levels
pf_filesystems = filesys.models.pf_filesys(
spc_dct_i, spc_mod_dct_i, run_prefix, save_prefix, saddle)
[harm_cnf_fs, _,
harm_min_locs, harm_save, _] = pf_filesystems['harm']
# [tors_cnf_fs, _, tors_min_locs, _, _] = pf_filesystems['tors']
# Get the conformer filesys for the reference geom and energy
if harm_min_locs:
geom = harm_cnf_fs[-1].file.geometry.read(harm_min_locs)
min_ene = harm_cnf_fs[-1].file.energy.read(harm_min_locs)
# Set the filesystem
tau_save_fs = autofile.fs.tau(harm_save)
# Get the rotor info
rotors = tors.build_rotors(spc_dct_i, pf_filesystems, spc_mod_dct_i)
run_path = filesys.models.make_run_path(pf_filesystems, 'tors')
tors_strs = tors.make_hr_strings(
rotors, run_path, spc_mod_dct_i)
[_, hr_str, flux_str, prot_str, _] = tors_strs
# Use model to determine whether to read grads and hessians
vib_model = spc_mod_dct_i['vib']['mod']
freqs = ()
_, _, proj_zpve, harm_zpve = vib.tors_projected_freqs_zpe(
pf_filesystems, hr_str, prot_str, run_prefix, zrxn=None)
zpe_chnlvl = proj_zpve * phycon.EH2KCAL
# Set reference energy to harmonic zpve
db_style = 'directory'
reference_energy = harm_zpve * phycon.EH2KCAL
if vib_model == 'tau':
if db_style == 'directory':
tau_locs = [locs for locs in tau_save_fs[-1].existing()
if tau_save_fs[-1].file.hessian.exists(locs)]
elif db_style == 'jsondb':
tau_locs = [locs for locs in tau_save_fs[-1].json_existing()
if tau_save_fs[-1].json.hessian.exists(locs)]
else:
if db_style == 'directory':
tau_locs = tau_save_fs[-1].existing()
elif db_style == 'jsondb':
tau_locs = tau_save_fs[-1].json_existing()
# Read the geom, ene, grad, and hessian for each sample
samp_geoms, samp_enes, samp_grads, samp_hessians = [], [], [], []
for locs in tau_locs:
# ioprinter.info_message('Reading tau info at path {}'.format(
# tau_save_fs[-1].path(locs)))
if db_style == 'directory':
geo = tau_save_fs[-1].file.geometry.read(locs)
elif db_style == 'jsondb':
geo = tau_save_fs[-1].json.geometry.read(locs)
geo_str = autofile.data_types.swrite.geometry(geo)
samp_geoms.append(geo_str)
if db_style == 'directory':
tau_ene = tau_save_fs[-1].file.energy.read(locs)
elif db_style == 'jsondb':
tau_ene = tau_save_fs[-1].json.energy.read(locs)
rel_ene = (tau_ene - min_ene) * phycon.EH2KCAL
ene_str = autofile.data_types.swrite.energy(rel_ene)
samp_enes.append(ene_str)
if vib_model == 'tau':
if db_style == 'directory':
grad = tau_save_fs[-1].file.gradient.read(locs)
elif db_style == 'jsondb':
grad = tau_save_fs[-1].json.gradient.read(locs)
grad_str = autofile.data_types.swrite.gradient(grad)
samp_grads.append(grad_str)
if db_style == 'directory':
hess = tau_save_fs[-1].file.hessian.read(locs)
elif db_style == 'jsondb':
hess = tau_save_fs[-1].json.hessian.read(locs)
hess_str = autofile.data_types.swrite.hessian(hess)
samp_hessians.append(hess_str)
# Read a geometry, grad, and hessian for a reference geom if needed
ref_geom, ref_grad, ref_hessian = [], [], []
if vib_model != 'tau':
# Get harmonic filesystem information
[harm_save_fs, _, harm_min_locs, _, _] = pf_filesystems['harm']
# Read the geometr, gradient, and Hessian
geo = harm_save_fs[-1].file.geometry.read(harm_min_locs)
geo_str = autofile.data_types.swrite.geometry(geo)
ref_geom.append(geo_str)
grad = harm_save_fs[-1].file.gradient.read(harm_min_locs)
grad_str = autofile.data_types.swrite.gradient(grad)
ref_grad.append(grad_str)
hess = harm_save_fs[-1].file.hessian.read(harm_min_locs)
hess_str = autofile.data_types.swrite.hessian(hess)
ref_hessian.append(hess_str)
# Obtain symmetry factor
ioprinter.info_message('Determining the symmetry factor...', newline=1)
sym_factor = symm.symmetry_factor(
pf_filesystems, spc_mod_dct_i, spc_dct_i, rotors,
)
# Create info dictionary
keys = ['geom', 'sym_factor', 'elec_levels', 'freqs', 'flux_mode_str',
'samp_geoms', 'samp_enes', 'samp_grads', 'samp_hessians',
'ref_geom', 'ref_grad', 'ref_hessian',
'zpe_chnlvl', 'reference_energy']
vals = [geom, sym_factor, spc_dct_i['elec_levels'], freqs, flux_str,
samp_geoms, samp_enes, samp_grads, samp_hessians,
ref_geom, ref_grad, ref_hessian,
zpe_chnlvl, reference_energy]
inf_dct = dict(zip(keys, vals))
return inf_dct
|
d27742140929d79dd4bb7a36094b5e5caf7173e2
| 19,729 |
def get_atten(log, atten_obj):
"""Get attenuator current attenuation value.
Args:
log: log object.
atten_obj: attenuator object.
Returns:
Current attenuation value.
"""
return atten_obj.get_atten()
|
22d69d326846105491b1fa90f319eb9e0da69a20
| 19,730 |
def lfs_hsm_remove(log, fpath, host=None):
"""
HSM remove
"""
command = ("lfs hsm_remove %s" % (fpath))
extra_string = ""
if host is None:
retval = utils.run(command)
else:
retval = host.sh_run(log, command)
extra_string = ("on host [%s]" % host.sh_hostname)
if retval.cr_exit_status != 0:
log.cl_error("failed to run command [%s]%s, "
"ret = [%d], stdout = [%s], stderr = [%s]",
command, extra_string,
retval.cr_exit_status, retval.cr_stdout,
retval.cr_stderr)
return -1
return 0
|
ddca4a626786dfecfef231737761924527d136d5
| 19,731 |
def area_under_curve_score(table,scoring_function):
"""Takes a run and produces the total area under the curve until the end of the run.
mean_area_under_curve_score is probably more informative."""
assert_run(table)
scores = get_scores(table,scoring_function)
return np.trapz(scores)
|
f84fd0a2adede09c17aa6254906bee36a2738983
| 19,732 |
def read_key_value(file):
"""支持注释,支持中文"""
return_dict = {}
lines = readlines(file)
for line in lines:
line = line.strip().split(':')
if line[0][0] == '#':
continue
key = line[0].strip()
value = line[1].strip()
return_dict[key] = value
return return_dict
|
9fdac43783c066872a05cbd59488add7a2dc54c0
| 19,733 |
def binarize_image(image):
"""Binarize image pixel values to 0 and 255."""
unique_values = np.unique(image)
if len(unique_values) == 2:
if (unique_values == np.array([0., 255.])).all():
return image
mean = image.mean()
image[image > mean] = 255
image[image <= mean] = 0
return image
|
6e4a621b0a2ff06d6a6bf5c0eb45f1028e6d526f
| 19,734 |
from typing import Type
def LineMatcher_fixture(request: FixtureRequest) -> Type["LineMatcher"]:
"""A reference to the :class: `LineMatcher`.
This is instantiable with a list of lines (without their trailing newlines).
This is useful for testing large texts, such as the output of commands.
"""
return LineMatcher
|
86c05df8f099ba66e62ae0bb071b2999cbb4f082
| 19,735 |
def Delay(opts, args):
"""Sleeps for a while
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the duration
the sleep
@rtype: int
@return: the desired exit code
"""
delay = float(args[0])
op = opcodes.OpTestDelay(duration=delay,
on_master=opts.on_master,
on_nodes=opts.on_nodes,
repeat=opts.repeat,
interruptible=opts.interruptible,
no_locks=opts.no_locks)
SubmitOrSend(op, opts)
return 0
|
c9ecd6cb3dbdcd5ae48c527f2f6769789b05664d
| 19,736 |
def logout():
""" Simply loading the logout page while logged in will log the user out """
logout_user()
return render_template(f"{app_name}/logout.html")
|
33191c6870a0aac8fcdebb0349b93196e2ed0ba8
| 19,737 |
from typing import Dict
from typing import Any
def identify_larger_definition(
one: ObjectDefinition,
two: ObjectDefinition
) -> Dict[str, Any]:
"""Return the larger (in dimensions) of the two given definitions."""
if not one:
return two
if not two:
return one
# TODO Handle if one has a larger X but other has a larger Z
return one if (
one.dimensions.x > two.dimensions.x or
one.dimensions.z > two.dimensions.z
) else two
|
0ba3815ced847de278b503f10a55a718ee00cd81
| 19,738 |
def duration_to_timedelta(obj):
"""Converts duration to timedelta
>>> duration_to_timedelta("10m")
>>> datetime.timedelta(0, 600)
"""
matches = DURATION_PATTERN.search(obj)
matches = matches.groupdict(default="0")
matches = {k: int(v) for k, v in matches.items()}
return timedelta(**matches)
|
fcfa67e6667b232a6647cb71fff543a45a6d3475
| 19,739 |
async def create_mock_hlk_sw16_connection(fail):
"""Create a mock HLK-SW16 client."""
client = MockSW16Client(fail)
await client.setup()
return client
|
14589398e268a76637994f2883f2cd824a14a81b
| 19,740 |
def inv_dist_weight(distances, b):
"""Inverse distance weight
Parameters
----------
distances : numpy.array of floats
Distances to point of interest
b : float
The parameter of the inverse distance weight. The higher, the
higher the influence of closeby stations.
Returns
-------
lambdas : numpy.array of floats
The lambda parameters of the stations
"""
lambdas = 1/distances**b / np.sum(1/distances**b)
return lambdas
|
c7e857bba312277b193ce5eda7467b8b0bf8bd75
| 19,741 |
import pytz
def load_inferred_fishing(table, id_list, project_id, threshold=True):
"""Load inferred data and generate comparison data
"""
query_template = """
SELECT vessel_id, start_time, end_time, nnet_score FROM
TABLE_DATE_RANGE([{table}],
TIMESTAMP('{year}-01-01'), TIMESTAMP('{year}-12-31'))
WHERE vessel_id in ({ids})
"""
ids = ','.join('"{}"'.format(x) for x in id_list)
ranges = defaultdict(list)
for year in range(2012, 2018):
query = query_template.format(table=table, year=year, ids=ids)
print(query)
for x in pd.read_gbq(query, project_id=project_id).itertuples():
score = x.nnet_score
if threshold:
score = score > 0.5
start = x.start_time.replace(tzinfo=pytz.utc)
end = x.end_time.replace(tzinfo=pytz.utc)
ranges[x.vessel_id].append(FishingRange(score, start, end))
print([(key, len(val)) for (key, val) in ranges.items()])
return ranges
|
fba7e007b38d141e91c0608cbd609a2d3b474b4b
| 19,742 |
from typing import Any
def is_optional(value: Any) -> CheckerReturn:
"""
It is a rather special validator because it never returns False and emits an exception
signal when the value is correct instead of returning True.
Its user should catch the signal to short-circuit the validation chain.
"""
if value is None:
raise exceptions.ValueNotRequired()
return True
|
25e45617ca5584dc2470d9e76ef884596c465917
| 19,743 |
from typing import Union
from typing import Tuple
from typing import List
def approximate_bounding_box_dyn_obstacles(obj: list, time_step=0) -> Union[
Tuple[list], None]:
"""
Compute bounding box of dynamic obstacles at time step
:param obj: All possible objects. DynamicObstacles are filtered.
:return:
"""
def update_bounds(new_point: np.ndarray, bounds: List[list]):
"""Update bounds with new point"""
if new_point[0] < bounds[0][0]:
bounds[0][0] = new_point[0]
if new_point[1] < bounds[1][0]:
bounds[1][0] = new_point[1]
if new_point[0] > bounds[0][1]:
bounds[0][1] = new_point[0]
if new_point[1] > bounds[1][1]:
bounds[1][1] = new_point[1]
return bounds
dynamic_obstacles_filtered = []
for o in obj:
if type(o) == DynamicObstacle:
dynamic_obstacles_filtered.append(o)
elif type(o) == Scenario:
dynamic_obstacles_filtered.extend(o.dynamic_obstacles)
x_int = [np.inf, -np.inf]
y_int = [np.inf, -np.inf]
bounds = [x_int, y_int]
shapely_set = None
for obs in dynamic_obstacles_filtered:
occ = obs.occupancy_at_time(time_step)
if occ is None:
continue
shape = occ.shape
if hasattr(shape, "_shapely_polygon"):
if shapely_set is None:
shapely_set = shape._shapely_polygon
else:
shapely_set = shapely_set.union(shape._shapely_polygon)
elif hasattr(shape, 'center'): # Rectangle, Circle
bounds = update_bounds(shape.center, bounds=bounds)
elif hasattr(shape, 'vertices'): # Polygon, Triangle
v = shape.vertices
bounds = update_bounds(np.min(v, axis=0), bounds=bounds)
bounds = update_bounds(np.max(v, axis=0), bounds=bounds)
envelope_bounds = shapely_set.envelope.bounds
envelope_bounds = np.array(envelope_bounds).reshape((2, 2))
bounds = update_bounds(envelope_bounds[0], bounds)
bounds = update_bounds(envelope_bounds[1], bounds)
if np.inf in bounds[0] or -np.inf in bounds[0] or np.inf in bounds[
1] or -np.inf in bounds[1]:
return None
else:
return tuple(bounds)
|
3a8fc28c2a47b50b9d0acc49f0818031e357fffa
| 19,744 |
from koala import KOALA_RSS # TODO: currently importing like this for workaround of circular imports
def sky_spectrum_from_fibres_using_file(
rss_file,
fibre_list=[],
win_sky=151,
n_sky=0,
skyflat="",
apply_throughput=True,
correct_ccd_defects=False,
fix_wavelengths=False,
sol=[0, 0, 0],
xmin=0,
xmax=0,
ymin=0,
ymax=0,
verbose=True,
plot=True,
):
"""
Parameters
----------
rss_file
fibre_list
win_sky
n_sky
skyflat
apply_throughput
correct_ccd_defects
fix_wavelengths
sol
xmin
xmax
ymin
ymax
verbose
plot
Returns
-------
"""
# Similar to in cube_alignement
# TODO: this function is never called it seems
if skyflat == "":
apply_throughput = False
plot_rss = False
else:
apply_throughput = True
plot_rss = True
if n_sky != 0:
sky_method = "self"
is_sky = False
if verbose:
print("\n> Obtaining 1D sky spectrum using {} lowest fibres in this rss ...".format(n_sky))
else:
sky_method = "none"
is_sky = True
if verbose:
print("\n> Obtaining 1D sky spectrum using fibre list = {} ...".format(fibre_list))
_test_rss_ = KOALA_RSS(
rss_file,
apply_throughput=apply_throughput,
skyflat=skyflat,
correct_ccd_defects=correct_ccd_defects,
fix_wavelengths=fix_wavelengths,
sol=sol,
sky_method=sky_method,
n_sky=n_sky,
is_sky=is_sky,
win_sky=win_sky,
do_extinction=False,
plot=plot_rss,
verbose=False,
)
if n_sky != 0:
print("\n> Sky fibres used: {}".format(_test_rss_.sky_fibres))
sky = _test_rss_.sky_emission
else:
sky = _test_rss_.plot_combined_spectrum(list_spectra=fibre_list, median=True)
if plot:
plt.figure(figsize=(14, 4))
if n_sky != 0:
plt.plot(_test_rss_.wavelength, sky, "b", linewidth=2, alpha=0.5)
ptitle = "Sky spectrum combining using {} lowest fibres".format(n_sky)
else:
for i in range(len(fibre_list)):
plt.plot(
_test_rss_.wavelength, _test_rss_.intensity_corrected[i], alpha=0.5
)
plt.plot(_test_rss_.wavelength, sky, "b", linewidth=2, alpha=0.5)
ptitle = "Sky spectrum combining " + np.str(len(fibre_list)) + " fibres"
plot_plot(_test_rss_.wavelength, sky, ptitle=ptitle)
print("\n> Sky spectrum obtained!")
return sky
|
e227a72f710c910685cad6941382dd44c7aacbe1
| 19,745 |
def binary_class_accuracy_score(y_pred, data):
"""LightGBM binary class accuracy-score function.
Parameters
----------
y_pred
LightGBM predictions.
data
LightGBM ``'Dataset'``.
Returns
-------
(eval_name, eval_result, is_higher_better)
``'eval_name'`` : string
is always 'accuracy' - the name of the metric
``'eval_result'`` : float
is the result of the metric
``'is_higher_better'`` : bool
is always 'True' because higher accuracy score is better
See Also
--------
* `sklearn.metrics.accuracy_score: <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html>`
* `LightGBM Training API: <https://lightgbm.readthedocs.io/en/latest/Python-API.html#training-api>`
"""
y_true = data.get_label()
y_pred = np.round(y_pred)
return 'accuracy', accuracy_score(y_true, y_pred), True
|
53f68931a96e3d32bed622dae09239ee2b96d762
| 19,746 |
import win32clipboard
def win32_clipboard_get():
""" Get the current clipboard's text on Windows.
Requires Mark Hammond's pywin32 extensions.
"""
try:
except ImportError:
message = ("Getting text from the clipboard requires the pywin32 "
"extensions: http://sourceforge.net/projects/pywin32/")
raise Exception(message)
win32clipboard.OpenClipboard()
text = win32clipboard.GetClipboardData(win32clipboard.CF_TEXT)
# FIXME: convert \r\n to \n?
win32clipboard.CloseClipboard()
return text
|
9d4f95a46893c2a93ae0b1c48e5b7db72a2352a8
| 19,747 |
def is_prime(n):
"""Given an integer n, return True if n is prime and False if not.
"""
return True
|
17d2d7bdf95a9d3e037e911a3271688013413fb7
| 19,748 |
import os
def path_to_newname(path, name_level=1):
"""
Takes one path and returns a new name, combining the directory structure
with the filename.
Parameters
----------
path : String
name_level : Integer
Form the name using items this far back in the path. E.g. if
path = mydata/1234/3.txt and name_level == 2, then name = 1234_3
Returns
-------
name : String
"""
name_plus_ext = path.split('/')[-name_level:]
name, ext = os.path.splitext('_'.join(name_plus_ext))
return name
|
e0d8fc09a8809bf8dfee26e208570b0e3c5a4d02
| 19,749 |
import os
def load_model_from_json(model_path=None, weights_path=None):
"""
load dataset and weights from file
input:
model_path path to the model file, should be json format
weights_path path to the weights file, should be HDF5 format
output:
Keras model
"""
# default model path
home_path = os.path.abspath(".")
if model_path is None:
model_path = os.path.join(home_path, "resModel.json")
# default weights path
if weights_path is None:
weights_path = os.path.join(home_path, "modelWeights.h5")
# read json model file
json = None
with open(model_path, "r") as f:
json = f.read()
# load model
model = model_from_json(json)
# add weights to the model
model.load_weights(weights_path)
return model
|
615a5a51704ee64bf7a45ca1da091278cb0d4453
| 19,750 |
import sys
def update_parameters(parameters,grads,learning_rate,optimizer,beta1=0.9,beta2=0.999, epsilon=1e-8):
"""
Description:
Updates the neural networks parameters (weights, biases) based on the optomizer selected
Arguments:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
optimizer -- the optimizer information that tracks the optimizer and it's state.
Optional Arguments:
beta1 -- Exponential decay hyperparameter for the first moment estimates
-Used in: Momentum, ADAM
-Common values for beta1 range from 0.8 to 0.999. If you don't feel inclined to tune this, beta = 0.9
is often a reasonable default.
beta2 -- Exponential decay hyperparameter for the second moment estimates
-Used in: ADAM(RMS PROP)
epsilon -- hyperparameter preventing division by zero in Adam updates
-Used in ADAM(RMS PROP)
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
optimizer -- the optimizer information that tracks the optimizer and it's state.
"""
# Update parameters via GD
if optimizer["optimizer_type"] == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
# Update pramaeters with Momentum
elif optimizer["optimizer_type"] == "momentum":
parameters, optimizer["v"] = update_parameters_with_momentum(parameters, grads, optimizer["v"], beta1,
learning_rate)
#update parameters with ADAM
elif optimizer["optimizer_type"] == "adam":
optimizer["t"] = optimizer["t"] + 1 # Adam counter for bias correction
parameters, optimizer["v"], optimizer["s"] = update_parameters_with_adam(parameters, grads, optimizer["v"],
optimizer["s"], optimizer["t"], learning_rate, beta1, beta2,
epsilon)
else:
print("ERROR: update_parameters - no optimizer_type was selected")
print("optimizer_type=" + optimizer["optimizer_type"])
sys.exit(1)
return parameters, optimizer
|
f9985935d214072340169d3b83958842efb6b838
| 19,751 |
import logging
def get_request_file():
"""
Method to implement REST API call of GET on address /file
"""
try:
content_file = open("html/file_get.html", "r")
content = content_file.read()
except:
logging.info("Could not load source HTML file '%s'")
raise
return content
|
7a084d5455b1797d851388ac40de23c5c35fb381
| 19,752 |
from collections import Counter
from typing import Iterable
def sock_merchant(arr: Iterable[int]) -> int:
"""
>>> sock_merchant([10, 20, 20, 10, 10, 30, 50, 10, 20])
3
>>> sock_merchant([6, 5, 2, 3, 5, 2, 2, 1, 1, 5, 1, 3, 3, 3, 5])
6
"""
count = Counter(arr).values()
ret = sum(n // 2 for n in count)
return ret
|
1b3b8d37ccb3494ed774e26a41ebba32c87a632c
| 19,753 |
def new_user_registration(email: str) -> dict:
"""Alert the CIDC admin mailing list to a new user registration."""
subject = "New User Registration"
html_content = (
f"A new user, {email}, has registered for the CIMAC-CIDC Data Portal ({ENV}). If you are a CIDC Admin, "
"please visit the accounts management tab in the Portal to review their request."
)
email = {
"to_emails": [CIDC_MAILING_LIST],
"subject": subject,
"html_content": html_content,
}
return email
|
ee4c57e45d15b8e65bd8e702633d527bc2f6db1f
| 19,754 |
def article_detail():
"""文章详情"""
id = request.form.get('id')
if id is None:
raise Exception('ARTICLE_NOT_EXIST')
article = Article.find(id)
if article is None:
raise Exception('ARTICLE_NOT_EXIST')
# 获取标签
if article.tags is None:
article.tags = []
else:
all_tags = Tag.find_all({'_id': {'$in': article.tags}})
all_tags = {str(tag._id): {'id': str(tag._id), 'name': tag.name} for tag in all_tags}
article.tags = [all_tags[str(id)] for id in article.tags if str(id) in all_tags]
return {'article': article.filter('title', 'draft', 'tags',
img=lambda article: images.url(article.img) if article.img else '')}
|
534cb77384b65cb4b88bedd1a82daeb93766714a
| 19,755 |
def add_state_names_column(my_df):
"""
Add a column of corresponding state names to a dataframe
Params (my_df) a DataFrame with a column called "abbrev" that has state abbreviations.
Return a copy of the original dataframe, but with an extra column.
"""
new_df = my_df.copy()
names_map = {"CA": "Cali", "CO": "Colorado", "CT": "Connecticut", "NJ": "New Jersey"}
new_df = df["name"] = new_df["abbrev"].map(names_map)
return my_df
|
4a9eb49ef2cda11d8135eb33ec43d99422e067b6
| 19,756 |
import glob
def list_subdir_paths(directory):
"""
Generates a list of subdirectory paths
:param directory: str pathname of target parent directory
:return: list of paths for each subdirectory in the target parent
directory
"""
subdir_paths = glob("{}/*/".format(directory))
return subdir_paths
|
df8ec80096b900ad8ceac3bc013fe47b21b4fd54
| 19,757 |
import math
def logic_method_with_bkg(plots_per_cycle, cycle_time, sigma_s=160, m=3, n=4):
"""
:param plots_per_cycle:
:param cycle_time:
:param sigma_s:
:param m:
:param n:
:return:
"""
N = plots_per_cycle.shape[0] # number of cycles
tracks = [] # ret
track_cnt = 0
# 取滑动窗口
succeed = False
for i in range(2, N - n): # cycle i
if succeed:
break
# 取滑窗(连续5个cycle)
window = slide_window(plots_per_cycle, n, start_cycle=i, skip_cycle=2)
# ----------对窗口中进行m/n统计
# 构建mapping链
K = min([cycle_plots.shape[0] for cycle_plots in window]) # 最小公共点迹数
mappings = defaultdict(dict)
for j in range(len(window) - 1, 0, -1):
# ----- 构建相邻cycle的mapping
mapping = matching_plots_nn(window[j], window[j - 1], K)
# -----
if len(set(mapping.values())) != len(set(mapping.keys())):
break
else:
mappings[j] = mapping
if len(mappings) < m: # 至少有m个cycle有效数据, 对应m-1个mapping
continue # 滑动到下一个window
# 对mapping结果进行排序(按照key降序排列)
mappings = sorted(mappings.items(), key=lambda x: x[0], reverse=True)
# print(mappings)
# 构建暂时航迹
for k in range(K): # 遍历每个暂时航迹
# ----- 航迹状态记录
# 窗口检出数计数: 每个暂时航迹单独计数
n_pass = 0
# 窗口运动状态记录: 每个航迹单独记录(速度, 加速度, 航向偏转角)
window_states = defaultdict(dict)
# -----
# ----- 构建暂时航迹组成的点迹(plots)
plot_ids = []
id = -1
# 提取倒序第一个有效cycle的第k个plot id
keys = mappings[0][1].keys()
keys = sorted(keys, reverse=False) # 按照当前window最大的有效cycle的点迹序号升序排列
id = keys[k]
plot_ids.append(id)
# 按照mapping链递推其余cycle的plot id
for (c, mapping) in mappings: # mapping已经按照cycle倒序排列过了
id = mapping[id] # 倒推映射链plot id
plot_ids.append(id)
# print(ids) # ids是按照cycle倒排的
# 根据ids链接构建plot链: 暂时航迹
cycle_ids = [c for (c, mapping) in mappings] # 按照cycle编号倒排
cycle_ids.extend([mappings[-1][0] - 1])
assert len(cycle_ids) == len(plot_ids)
plots = [window[cycle][plot_id]
for cycle, plot_id in zip(cycle_ids, plot_ids)]
# print(plots)
# window内逐一门限测试
# for l, (cycle_id, plot) in enumerate(zip(cycle_ids_to_test, plots_to_test)):
for l in range(len(plots) - 2):
cycle_id = cycle_ids[l]
# 构建连续三个cycle的plots
# plots_2 = [plots[l + 1], plots[l]]
plots_3 = [plots[l + 2], plots[l + 1], plots[l]]
# plot_plots(plots_2, [cycle_ids[l+1], cycle_ids[l]])
# plot_plots(plots_3, [cycle_ids[l+2], cycle_ids[l+1], cycle_ids[l]])
# 估算当前点迹的运动状态
v, a, angle_in_radians = get_v_a_angle(plots_3, cycle_time)
# v = get_v(plots_2, cycle_time)
# 航向偏移角度估算
angle_in_degrees = math.degrees(angle_in_radians)
angle_in_degrees = angle_in_degrees if angle_in_degrees >= 0.0 else angle_in_degrees + 360.0
angle_in_degrees = angle_in_degrees if angle_in_degrees <= 360.0 else angle_in_degrees - 360.0
# 初始波门判定: j是当前判定序列的第二次扫描
if start_gate_check(cycle_time, plots[l + 2], plots[l + 1], v0=340):
# --- 对通过初始波门判定的航迹建立暂时航迹, 继续判断相关波门
# 相关(跟踪)波门判定page71-72
if relate_gate_check(cycle_time, v, a, plots[l + 2], plots[l + 1], plots[l], sigma_s=sigma_s):
n_pass += 1
# window运动状态记录
state_dict = {
'cycle': cycle_id,
'x': plots[l][0],
'y': plots[l][1],
'v': v,
'a': a,
'angle_in_degrees': angle_in_degrees
}
window_states[cycle_id] = state_dict
## ----- 记录window中最前面的两个点迹的运动状态
if l == len(plots) - 2 - 1:
print('Add plot for the first 2 plots in the window...')
plots_2 = [plots[l + 1], plots[l]]
v = get_v(plots_2, cycle_time)
# window第1号点迹运动状态记录
state_dict = {
'cycle': cycle_id - 1,
'x': plots[l + 1][0],
'y': plots[l + 1][1],
'v': v,
'a': -1,
'angle_in_degrees': -1
}
window_states[cycle_id - 1] = state_dict
# window第0号点迹运动状态记录
state_dict = {
'cycle': cycle_id - 2,
'x': plots[l + 2][0],
'y': plots[l + 2][1],
'v': -1,
'a': -1,
'angle_in_degrees': -1
}
window_states[cycle_id - 2] = state_dict
else:
print('Track init failed @cycle{:d}, object(plot) is not in relating gate.'.format(i))
else:
print('Track init failed @cycle{:d} @window{:d}, object(plot) is not in the starting gate.'
.format(i, j))
# 判定是否当前航迹初始化成功
if n_pass >= m:
print(
'Track {:d} inited successfully @cycle {:d}.'.format(k, i))
# -----初始化航迹对象
track = Track()
track.id_ = track_cnt # 航迹编号
track.state_ = 2 # 航迹状态: 可靠航迹
track.init_cycle_ = i # 航迹起始cycle
window_states = sorted(window_states.items(
), key=lambda x: x[0], reverse=False) # 升序重排
# 添加已初始化点迹
for k, v in window_states:
# print(k, v)
plot = Plot(v['cycle'], v['x'], v['y'],
v['v'], v['a'], v['angle_in_degrees'])
plot.state_ = 1 # 'Related'
plot.correlated_track_id_ = track.id_
track.add_plot(plot)
track.quality_counter_ += 1 # 航迹质量得分更新
tracks.append(track)
# -----
# 更新航迹编号
track_cnt += 1
# 航迹起始成功标识
succeed = True
# 清空窗口状态
window_states = defaultdict(dict)
# 跳出当前航迹检测, 到下一个暂时航迹
continue
return succeed, tracks
|
a77e8a9d116187dce674c33ca098012bb6b22363
| 19,758 |
from typing import Union
def bias_scan(
data: pd.DataFrame,
observations: pd.Series,
expectations: Union[pd.Series, pd.DataFrame] = None,
favorable_value: Union[str, float] = None,
overpredicted: bool = True,
scoring: Union[str, ScoringFunction] = "Bernoulli",
num_iters: int = 10,
penalty: float = 1e-17,
mode: str = "binary",
**kwargs,
):
"""
scan to find the highest scoring subset of records
:param data (dataframe): the dataset (containing the features) the model was trained on
:param observations (series): ground truth (correct) target values
:param expectations (series, dataframe, optional): pandas series estimated targets
as returned by a model for binary, continuous and ordinal modes.
If mode is nominal, this is a dataframe with columns containing expectations for each nominal class.
If None, model is assumed to be a dumb model that predicts the mean of the targets
or 1/(num of categories) for nominal mode.
:param favorable_value(str, float, optional): Should be high or low or float if the mode in [binary, ordinal, or continuous].
If float, value has to be minimum or maximum in the observations column. Defaults to high if None for these modes.
Support for float left in to keep the intuition clear in binary classification tasks.
If mode is nominal, favorable values should be one of the unique categories in the observations.
Defaults to a one-vs-all scan if None for nominal mode.
:param overpredicted (bool, optional): flag for group to scan for.
True means we scan for a group whose expectations/predictions are systematically higher than observed.
In other words, True means we scan for a group whose observeed is systematically lower than the expectations.
False means we scan for a group whose expectations/predictions are systematically lower than observed.
In other words, False means we scan for a group whose observed is systematically higher than the expectations.
:param scoring (str or class): One of 'Bernoulli', 'Gaussian', 'Poisson', or 'BerkJones' or subclass of
:class:`aif360.metrics.mdss.ScoringFunctions.ScoringFunction`.
:param num_iters (int, optional): number of iterations (random restarts). Should be positive.
:param penalty (float,optional): penalty term. Should be positive. The penalty term as with any regularization parameter may need to be
tuned for ones use case. The higher the penalty, the less complex (number of features and feature values) the
highest scoring subset that gets returned is.
:param mode: one of ['binary', 'continuous', 'nominal', 'ordinal']. Defaults to binary.
In nominal mode, up to 10 categories are supported by default.
To increase this, pass in keyword argument max_nominal = integer value.
:returns: the highest scoring subset and the score or dict of the highest scoring subset and the score for each category in nominal mode
"""
# Ensure correct mode is passed in.
modes = ["binary", "continuous", "nominal", "ordinal"]
assert mode in modes, f"Expected one of {modes}, got {mode}."
# Set correct favorable value (this tells us if higher or lower is better)
min_val, max_val = observations.min(), observations.max()
uniques = list(observations.unique())
if favorable_value == 'high':
favorable_value = max_val
elif favorable_value == 'low':
favorable_value = min_val
elif favorable_value is None:
if mode in ["binary", "ordinal", "continuous"]:
favorable_value = max_val # Default to higher is better
elif mode == "nominal":
favorable_value = "flag-all" # Default to scan through all categories
assert favorable_value in [
"flag-all",
*uniques,
], f"Expected one of {uniques}, got {favorable_value}."
assert favorable_value in [
min_val,
max_val,
"flag-all",
*uniques,
], f"Favorable_value should be high, low, or one of categories {uniques}, got {favorable_value}."
# Set appropriate direction for scanner depending on mode and overppredicted flag
if mode in ["ordinal", "continuous"]:
if favorable_value == max_val:
kwargs["direction"] = "negative" if overpredicted else "positive"
else:
kwargs["direction"] = "positive" if overpredicted else "negative"
else:
kwargs["direction"] = "negative" if overpredicted else "positive"
# Set expectations to mean targets for non-nominal modes
if expectations is None and mode != "nominal":
expectations = pd.Series(observations.mean(), index=observations.index)
# Set appropriate scoring function
if scoring == "Bernoulli":
scoring = Bernoulli(**kwargs)
elif scoring == "BerkJones":
scoring = BerkJones(**kwargs)
elif scoring == "Gaussian":
scoring = Gaussian(**kwargs)
elif scoring == "Poisson":
scoring = Poisson(**kwargs)
else:
scoring = scoring(**kwargs)
if mode == "binary": # Flip observations if favorable_value is 0 in binary mode.
observations = pd.Series(observations == favorable_value, dtype=int)
elif mode == "nominal":
unique_outs = set(sorted(observations.unique()))
size_unique_outs = len(unique_outs)
if expectations is not None: # Set expectations to 1/(num of categories) for nominal mode
expectations_cols = set(sorted(expectations.columns))
assert (
unique_outs == expectations_cols
), f"Expected {unique_outs} in expectation columns, got {expectations_cols}"
else:
expectations = pd.Series(
1 / observations.nunique(), index=observations.index
)
max_nominal = kwargs.get("max_nominal", 10)
assert (
size_unique_outs <= max_nominal
), f"Nominal mode only support up to {max_nominal} labels, got {size_unique_outs}. Use keyword argument max_nominal to increase the limit."
if favorable_value != "flag-all": # If favorable flag is set, use one-vs-others strategy to scan, else use one-vs-all strategy
observations = observations.map({favorable_value: 1})
observations = observations.fillna(0)
if isinstance(expectations, pd.DataFrame):
expectations = expectations[favorable_value]
else:
results = {}
orig_observations = observations.copy()
orig_expectations = expectations.copy()
for unique in uniques:
observations = orig_observations.map({unique: 1})
observations = observations.fillna(0)
if isinstance(expectations, pd.DataFrame):
expectations = orig_expectations[unique]
scanner = MDSS(scoring)
result = scanner.scan(
data, expectations, observations, penalty, num_iters, mode=mode
)
results[unique] = result
return results
scanner = MDSS(scoring)
return scanner.scan(data, expectations, observations, penalty, num_iters, mode=mode)
|
735ccc6c3054e981a7aee9681d892e226316ed41
| 19,759 |
def int_from_bin_list(lst):
"""Convert a list of 0s and 1s into an integer
Args:
lst (list or numpy.array): list of 0s and 1s
Returns:
int: resulting integer
"""
return int("".join(str(x) for x in lst), 2)
|
a41b2578780019ed1266442d76462fb89ba2a0fb
| 19,760 |
def validate_array_input(arr, dtype, arr_name):
"""Check if array has correct type and is numerical.
This function checks if the input is either a list, numpy.ndarray or
pandas.Series of numerical values, converts it to a numpy.ndarray and
throws an error in case of incorrect data.
Args:
arr: Array of data
dtype: One of numpy's dtypes
arr_name: String specifing the variable name, so that the error
message can be adapted correctly.
Returns:
A as numpy.ndarray converted array of values with a datatype
specified in the input argument.
Raises:
ValueError: In case non-numerical data is passed
TypeError: If the error is neither a list, a numpy.ndarray nor a
pandas.Series
"""
# Check for correct data type
if isinstance(arr, (list, np.ndarray, pd.Series)):
# Try to convert as numpy array
try:
arr = np.array(arr, dtype=dtype).flatten()
except:
msg = ["The data in the parameter array '{}'".format(arr_name),
" must be purely numerical."]
raise ValueError("".join(msg))
else:
msg = ["The array {} must be either a list, ".format(arr_name),
"numpy.ndarray or pandas.Series"]
raise TypeError("".join(msg))
# return converted array
return arr
|
72829dad46aa6e5054cd0d49ff0206083781bddd
| 19,761 |
from sklearn.manifold import TSNE
from sklearn.cluster import AgglomerativeClustering
from sklearn.preprocessing import StandardScaler
def ClassifyBehavior(data, bp_1="snout",bp_2="ear_L", bp_3="ear_R", bp_4="tail", dimensions = 2,distance=28,**kwargs):
"""
Returns an array with the cluster by frame, an array with the embedding data in low-dimensional
space and the clusterization model.
Parameters
----------
data : pandas DataFrame
The input tracking data.
bp_1 : str
Body part representing snout.
bp_2 : str
Body part representing left ear.
bp_3 : str
Body part representing right ear.
bp_4 : str
Body part representing tail.
dimensions : int
Dimension of the embedded space.
distance : int
The linkage distance threshold above which, clusters will not be merged.
startIndex : int, optional
Initial index.
n_jobs : int, optional
The number of parallel jobs to run for neighbors search.
verbose : int, optional
Verbosity level.
perplexity : float, optional
The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity.
Returns
-------
cluster_labels : array
Array with the cluster by frame.
X_transformed : array
Embedding of the training data in low-dimensional space.
model : Obj
AgglomerativeClustering model.
See Also
--------
For more information and usage examples: https://github.com/pyratlib/pyrat
Notes
-----
This function was developed based on DLC outputs and is able to support
matplotlib configurations."""
startIndex = kwargs.get('startIndex')
n_jobs = kwargs.get('n_jobs')
verbose = kwargs.get('verbose')
perplexity = kwargs.get("perplexity")
if type(startIndex) == type(None):
startIndex = 0
if type(n_jobs) == type(None):
n_jobs=-1
if type(verbose) == type(None):
verbose=0
if type(perplexity) == type(None):
perplexity=500
values = (data.iloc[2:,1:].values).astype(np.float)
lista1 = (data.iloc[0][1:].values +" - " + data.iloc[1][1:].values).tolist()
nose = np.concatenate(((values[:,lista1.index(bp_1+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bp_1+" - y")]).reshape(1,-1).T), axis=1)
earr = np.concatenate(((values[:,lista1.index(bp_2+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bp_2+" - y")]).reshape(1,-1).T), axis=1)
earl = np.concatenate(((values[:,lista1.index(bp_3+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bp_3+" - y")]).reshape(1,-1).T), axis=1)
tail = np.concatenate(((values[:,lista1.index(bp_4+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bp_4+" - y")]).reshape(1,-1).T), axis=1)
bodyparts = [nose, earr, earl, tail]
distances = []
for k in range(len(bodyparts[0])):
frame_distances = []
for i in range(len(bodyparts)):
distance_row = []
for j in range( len(bodyparts) ):
distance_row.append(np.linalg.norm(bodyparts[i][k] - bodyparts[j][k]))
frame_distances.append(distance_row)
distances.append(frame_distances)
distances2 = np.asarray(distances)
for i in range(4):
for k in range(4):
distances2[:, i, j] = distances2[:, i, j]/np.max(distances2[:, i, j])
d = []
for i in range(distances2.shape[0]):
d.append(distances2[i, np.triu_indices(4, k = 1)[0], np.triu_indices(4, k = 1)[1]])
d = StandardScaler().fit_transform(d)
embedding = TSNE(n_components=dimensions, n_jobs=n_jobs, verbose=verbose, perplexity=perplexity)
X_transformed = embedding.fit_transform(d[startIndex:])
model = AgglomerativeClustering(n_clusters=None,distance_threshold=distance)
model = model.fit(d[startIndex:])
cluster_labels = model.labels_
return cluster_labels, X_transformed, model
|
002e11bd0b6050fcfa8b50df0f0b24a3cc36bed7
| 19,762 |
def grab_inputs(board):
"""
Asks for inputs and returns a row, col. Also updates the board state.
"""
keepasking = True
while keepasking:
try:
row = int(input("Input row"))
col = int(input("Input column "))
except (EOFError, KeyboardInterrupt):
print('Cya nerd')
exit()
except:
print("That's not an integer you mongoloid.")
else: # If it's an int
valid_board = board.update_board(row, col)
if valid_board == False:
print("Your row or col is out of range. Try ranges 0-2 and make sure there's nothing there already.")
else: # If it's a valid board
keepasking = False
return row, col
|
0fb840348ff645d9f2a48e1c028d99bff0bf31fe
| 19,763 |
def start_session():
""" This function is what initializes the application."""
welcome_msg = render_template('welcome')
return question(welcome_msg)
|
ce666a48f078e49a0df98b5087c71cb1e548e905
| 19,764 |
def solve(filename):
"""
Run a sample, do the analysis and store a program to apply to a test case
"""
arc = Arc(filename)
arc.print_training_outputs()
return arc.solve()
|
2a23021bb31508fd67c4be178684bb2da7d1d7c9
| 19,765 |
from typing import Sequence
def extract_item(item, prefix=None, entry=None):
"""a helper function to extract sequence, will extract values from
a dicom sequence depending on the type.
Parameters
==========
item: an item from a sequence.
"""
# First call, we define entry to be a lookup dictionary
if entry is None:
entry = {}
# Skip raw data elements
if not isinstance(item, RawDataElement):
header = item.keyword
# If there is no header or field, we can't evaluate
if header in [None, ""]:
return entry
if prefix is not None:
header = "%s__%s" % (prefix, header)
value = item.value
if isinstance(value, bytes):
value = value.decode("utf-8")
if isinstance(value, Sequence):
return extract_sequence(value, prefix=header)
entry[header] = value
return entry
|
a4c8c99bcd54baefdbaa95469bb3a289c2811cfc
| 19,766 |
def route_counts(session, origin_code, dest_code):
""" Get count of flight routes between origin and dest. """
routes = session.tables["Flight Route"]
# airports = session.tables["Reporting Airport"]
# origin = airports["Reporting Airport"] == origin_code
origin = SelectorClause(
"Reporting Airport", REPORTING_AIRPORT_CODE, [origin_code], session=session
)
dest = routes["Origin Destination"] == dest_code
audience = routes * origin & dest
return audience.select().count
|
ad35a36b6874bcf45107d7217acdb6bae097b305
| 19,767 |
from pathlib import Path
def generate_master_bias(
science_frame : CCDData,
bias_path : Path,
use_cache : bool=True
) -> CCDData:
"""
"""
cache_path = generate_cache_path(science_frame, bias_path) / 'bias'
cache_file = cache_path / 'master.fits'
if use_cache and cache_file.is_file():
ccd = CCDData.read(cache_file)
if ccd is not None:
return ccd
cache_path.mkdir(parents=True, exist_ok=True)
ccd = calibrate_bias(science_frame, bias_path)
if ccd is not None:
ccd.write(cache_file)
return ccd
|
207ca95109694d16e154088c7e3a12880f01d037
| 19,768 |
def RetryOnException(retry_checker,
max_retries,
sleep_multiplier=0,
retry_backoff_factor=1):
"""Decorater which retries the function call if |retry_checker| returns true.
Args:
retry_checker: A callback function which should take an exception instance
and return True if functor(*args, **kwargs) should be retried
when such exception is raised, and return False if it should
not be retried.
max_retries: Maximum number of retries allowed.
sleep_multiplier: Will sleep sleep_multiplier * attempt_count seconds if
retry_backoff_factor is 1. Will sleep
sleep_multiplier * (
retry_backoff_factor ** (attempt_count - 1))
if retry_backoff_factor != 1.
retry_backoff_factor: See explanation of sleep_multiplier.
Returns:
The function wrapper.
"""
def _Wrapper(func):
def _FunctionWrapper(*args, **kwargs):
return Retry(retry_checker, max_retries, func, sleep_multiplier,
retry_backoff_factor, *args, **kwargs)
return _FunctionWrapper
return _Wrapper
|
a721e14c7d5d98e2151f4108dcff18cb0de225e3
| 19,769 |
import csv
import itertools
def ParseCsvFile(fp):
"""Parse dstat results file in csv format.
Args:
file: string. Name of the file.
Returns:
A tuple of list of dstat labels and ndarray containing parsed data.
"""
reader = csv.reader(fp)
headers = list(itertools.islice(reader, 5))
if len(headers) != 5:
raise ValueError(
'Expected exactly 5 header lines got {}\n{}'.format(
len(headers), headers))
if 'Dstat' not in headers[0][0]:
raise ValueError(
'Expected first header cell to contain "Dstat"\n{}'.format(
headers[0]))
if 'Host:' not in headers[2][0]:
raise ValueError(('Expected first cell in third line to be '
'"Host:"\n{}').format(headers[2]))
categories = next(reader)
# Categories are not repeated; copy category name across columns in the
# same category
for i, category in enumerate(categories):
if not categories[i]:
categories[i] = categories[i - 1]
labels = next(reader)
if len(labels) != len(categories):
raise ValueError((
'Number of categories ({}) does not match number of '
'labels ({})\nCategories: {}\nLabels:{}').format(
len(categories), len(labels), categories, labels))
# Generate new column names
labels = ['%s__%s' % x for x in zip(labels, categories)]
data = []
for i, row in enumerate(reader):
# Remove the trailing comma
if len(row) == len(labels) + 1:
if row[-1]:
raise ValueError(('Expected the last element of row {0} to be empty,'
' found {1}').format(row, row[-1]))
row = row[:-1]
if len(labels) != len(row):
raise ValueError(('Number of labels ({}) does not match number of '
'columns ({}) in row {}:\n{}').format(
len(labels), len(row), i, row))
data.append(row)
return labels, np.array(data, dtype=float)
|
39381d0f9eaab1ab139d4d660257aeaec6e765ca
| 19,770 |
def uid_to_device_name(uid):
"""
Turn UID into its corresponding device name.
"""
return device_id_to_name(uid_to_device_id(uid))
|
e4ec879bb1619fd1e215c94084117b3ce6b237bc
| 19,771 |
def zonal_convergence(u, h, dx, dy, dy_u, ocean_u):
"""Compute convergence of zonal flow.
Returns -(hu)_x taking account of the curvature of the grid.
"""
res = create_var(u.shape)
for j in range(u.shape[-2]):
for i in range(u.shape[-1]):
res[j, i] = (-1) * (
h[j, cx(i + 1)] * u[j, cx(i + 1)] * dy_u[j, cx(i + 1)] * ocean_u[j, cx(i + 1)]
- h[j, i] * u[j, i] * dy_u[j, i] * ocean_u[j, i]
) / (dx[j, i] * dy[j, i])
return res
|
42a0ee78e0c4d8f78a600a9dd72aa03aa104f560
| 19,772 |
def filterPoints(solutions, corners):
"""Remove solutions if they are not whithin the perimeter.
This function use shapely as the mathematical computaions for non rectangular
shapes are quite heavy.
Args:
solutions: A list of candidate points.
corners: The perimeter of the garden (list of LEDs).
Returns:
A list of points filtered.
"""
coords = []
for i in corners:
if i.inPerimeter:
coords.append((i.point.X, i.point.Y))
polygon = shapely.geometry.polygon.Polygon(coords)
solutions_2 = [value.toShapely() for value in solutions
if polygon.contains(value.toShapely())]
return [Point(v.x, v.y) for v in solutions_2]
|
55c6e824d46e934eb30c6ecca45f516f09f0bff2
| 19,773 |
from typing import Set
def get_migrations_from_old_config_key_startswith(old_config_key_start: str) -> Set[AbstractPropertyMigration]:
"""
Get all migrations where old_config_key starts with given value
"""
ret = set()
for migration in get_history():
if isinstance(migration, AbstractPropertyMigration) and \
migration.old_config_key and \
migration.old_config_key.startswith(old_config_key_start):
ret.add(migration)
return ret
|
c8224af6e9a675ed940bf61de984a8dd01f634d5
| 19,774 |
def bbox_mapping(bboxes,
img_shape,
scale_factor,
flip,
flip_direction, # ='horizontal',
tile_offset):
"""Map bboxes from the original image scale to testing scale."""
new_bboxes = bboxes * bboxes.new_tensor(scale_factor)
if flip:
new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction)
# add by hui ############################################
assert tile_offset is None or (isinstance(tile_offset, (tuple, list)) and len(tile_offset) == 2), \
"tile_offset must be None or (dx, dy) or [dx, dy]"
if tile_offset is not None:
dx, dy = tile_offset
new_bboxes[:, [0, 2]] -= dx
new_bboxes[:, [1, 3]] -= dy
h, w, c = img_shape
new_bboxes[:, [0, 2]] = new_bboxes[:, [0, 2]].clamp(0, w - 1)
new_bboxes[:, [1, 3]] = new_bboxes[:, [1, 3]].clamp(0, h - 1)
W, H = new_bboxes[:, 2] - new_bboxes[:, 0], new_bboxes[:, 3] - new_bboxes[:, 1]
keep = (W >= 2) & (H >= 2)
new_bboxes = new_bboxes[keep]
# #################################################################
return new_bboxes
|
a5fb8283eb6c379ef516db3a72c50d34c58ea8e6
| 19,775 |
import os
def getRNA_X(sample_list, DATAPATH, ctype, lab_type):
"""
Get X for RNA. The required columns are retained and all other rows and
columns dropped. This function also labels the data for building models.
Parameters
----------
sample_list : list
List of tumour samples to be retained.
DATAPATH : str
Complete path to SNV data for the samples and other data for different
laabelling techniques.
ctype : str
Cancer-type.
lab_type : str
Labelling stratergy to be used.
Returns
-------
data : DataFrame
DataFrame containing feature matrix to be trained on and labels.
data_meta : DataFrame
DataFrame containing mata data for the feature matrix.
"""
# Load SNV data (for labelling)
os.chdir(DATAPATH + "/GDC_{}/SNV".format(ctype))
fname="{}_snv.tsv".format(ctype)
snv_lab = pd.read_csv(fname, sep="\t", header=0)
snv_lab.Tumor_Sample_Barcode = [samp[:16] for samp in snv_lab.Tumor_Sample_Barcode]
snv_lab = snv_lab[snv_lab.Tumor_Sample_Barcode.isin(sample_list)]
snv_lab.index = ["{};{}".format(samp[:16], gene) for samp, gene in zip(snv_lab.Tumor_Sample_Barcode, snv_lab.Hugo_Symbol)]
# Add labels
if lab_type == "civic":
snv_lab = snv.getCivicLabels(snv_lab, DATAPATH)
if lab_type == "martellotto":
snv_lab = snv.getMartelottoLabels(snv_lab, DATAPATH)
if lab_type == "cgc":
snv_lab = snv.getCGCLabels(snv_lab, DATAPATH)
if lab_type == "bailey":
snv_lab = snv.getBaileyLabels(snv_lab, DATAPATH, ctype)
# Remove duplicates and keep labelled data_snp
snv_lab = snv_lab[snv_lab.Label != "Unlabelled"]
snv_lab = snv_lab[~snv_lab.index.duplicated()]
# load data
path_network = DATAPATH + "/network"
data = [None] * len(sample_list)
datapath = DATAPATH + "/GDC_{}/RNA-seq".format(ctype)
for idx, file in enumerate(sample_list):
temp = getRNAFeatures(datapath, file, ctype, path_network, n=1)
# Assign labels to RNA data
temp["Label"] = [snv_lab.loc[idx, "Label"] if idx in snv_lab.index else "Unlabelled" for idx in temp.index]
temp = temp[temp["Label"] != "Unlabelled"]
# Drop nan rows
data[idx] = temp.dropna(axis=0)
# Concat data
data = pd.concat(data)
# Define meta-data and drop meta-data columns from RNA data
data_meta = data[['genes', 'Tumor_Sample_Barcode', 'Label']]
data_meta.index = data.index
d_cols = [x for x in data.columns if x in ['genes', 'unshrunk.logFC',
'PValue', 'FDR',
'Tumor_Sample_Barcode']]
data = data.drop(d_cols, axis=1)
return (data, data_meta)
|
e1ea6b4472f0f899db0fbeeb1ed150208dbb7888
| 19,776 |
import torch
def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):
"""Convert 3x4 rotation matrix to 4d quaternion vector
This algorithm is based on algorithm described in
https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201
Args:
rotation_matrix (Tensor): the rotation matrix to convert.
Return:
Tensor: the rotation in quaternion
Shape:
- Input: :math:`(N, 3, 4)`
- Output: :math:`(N, 4)`
Example:
>>> input = torch.rand(4, 3, 4) # Nx3x4
>>> output = tgm.rotation_matrix_to_quaternion(input) # Nx4
"""
if not torch.is_tensor(rotation_matrix):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(rotation_matrix)))
if len(rotation_matrix.shape) > 3:
raise ValueError(
"Input size must be a three dimensional tensor. Got {}".format(
rotation_matrix.shape))
if not rotation_matrix.shape[-2:] == (3, 4):
raise ValueError(
"Input size must be a N x 3 x 4 tensor. Got {}".format(
rotation_matrix.shape))
rmat_t = torch.transpose(rotation_matrix, 1, 2)
mask_d2 = rmat_t[:, 2, 2] < eps
mask_d0_d1 = rmat_t[:, 0, 0] > rmat_t[:, 1, 1]
mask_d0_nd1 = rmat_t[:, 0, 0] < -rmat_t[:, 1, 1]
t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q0 = torch.stack([rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2]], -1)
t0_rep = t0.repeat(4, 1).t()
t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q1 = torch.stack([rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1]], -1)
t1_rep = t1.repeat(4, 1).t()
t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q2 = torch.stack([rmat_t[:, 0, 1] - rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2],
rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2], -1)
t2_rep = t2.repeat(4, 1).t()
t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q3 = torch.stack([t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] - rmat_t[:, 1, 0]], -1)
t3_rep = t3.repeat(4, 1).t()
mask_c0 = mask_d2 * mask_d0_d1
# mask_c1 = mask_d2 * (1 - mask_d0_d1)
mask_c1 = mask_d2 * (~mask_d0_d1)
# mask_c2 = (1 - mask_d2) * mask_d0_nd1
mask_c2 = (~mask_d2) * mask_d0_nd1
# mask_c3 = (1 - mask_d2) * (1 - mask_d0_nd1)
mask_c3 = (~mask_d2) * (~mask_d0_nd1)
mask_c0 = mask_c0.view(-1, 1).type_as(q0)
mask_c1 = mask_c1.view(-1, 1).type_as(q1)
mask_c2 = mask_c2.view(-1, 1).type_as(q2)
mask_c3 = mask_c3.view(-1, 1).type_as(q3)
q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3
q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa
t2_rep * mask_c2 + t3_rep * mask_c3) # noqa
q *= 0.5
return q
|
3198dcd9f7a058a54be0d607cc66f543ea8e46f8
| 19,777 |
import calendar
from datetime import datetime
def get_first_day_and_last_day_by_month(months=0):
"""获取某月份的第一天的日期和最后一天的日期
:param months: int, 负数表示过去的月数,正数表示未来的
:return tuple: (某月第一天日期, 某月最后一天日期)
"""
day = get_today() + relativedelta(months=months)
year = day.year
month = day.month
# 获取某年某月的第一天的星期和该月总天数
_, month_range = calendar.monthrange(year, month)
first = datetime.date(year=year, month=month, day=1)
last = datetime.date(year=year, month=month, day=month_range)
return first, last
|
1aea5aa0c1abcc8382212315f0e34cf0f33968b9
| 19,778 |
def kmeans(X, C):
"""The Loyd's algorithm for the k-centers problems.
X : data matrix
C : initial centers
"""
C = C.copy()
V = np.zeros(C.shape[0])
for x in X:
idx = np.argmin(((C - x)**2).sum(1))
V[idx] += 1
eta = 1.0 / V[idx]
C[idx] = (1.0 - eta) * C[idx] + eta * x
return C
|
3006c10bf9091a39f4808781e4b484fc24f2ae3f
| 19,779 |
from re import DEBUG
def gap_init(points, D, d, C, L=None, st=None, K=None, minimize_K=True,
find_optimal_seeds=True,
seed_method="cones",
seed_edge_weight_type='EUC_2D',
use_adaptive_L_constraint_weights=True,
increase_K_on_failure=False):
#REMOVEME, disable!
#increase_K_on_failure=True):
""" An implementation of a three phase cluster-first-route-second CVRP
construction / route initialization algorithm. The first two phases involve
the clustering. First, a seed point is generated for each route, which is
then used in approximating customer node service costs in solving
generalized assignment problem (GAP) relaxation of the VRP. The resulting
assignments are then routed using a TSP solver. The algorithm has been
first proposed in (Fisher and Jaikumar 1981).
The algorithm assumes that the problem is planar and this implementation
allows seed in two ways:
* seed_method="cones", the initialization method of Fisher and Jaikumar
(1981) which can be described as Sweep with fractional distribution of
customer demand and placing the seed points approximately to the center
of demand mass of created sectors.
* seed_method="kmeans", intialize seed points to k-means cluster centers.
* seed_method="large_demands", according to Fisher and Jaikumar (1981)
"Customers for which d_i > 1/2 C can also be made seed customers".
However applying this rule relies on human operator who then decides
the intuitively best seed points. This implementation selects the
seed points satisfying the criteria d_i>mC, where m is the fractional
capacity multipier, that are farthest from the depot and each other.
The m is made iteratively smaller if there are no at least K seed point
candidates.
* seed_method="ends_of_thoroughfares", this option was descibed in
(Fisher and Jaikumar 1981) as "Most distant customers at the end of
thoroughfares leaving from the depot are natural seed customers". They
relied on human operator. To automate this selection we make a
DBSCAN clustering with eps = median 2. nearest neighbor of all nodes
and min_samples of 3.
The other parameters are:
* points is a list of x,y coordinates of the depot [0] and the customers.
* D is a numpy ndarray (or equvalent) of the full 2D distance matrix.
including the service times (st/2.0 for leaving and entering nodes).
* d is a list of demands. d[0] should be 0.0 as it is the depot.
* C is the capacity constraint limit for the identical vehicles.
* L is the optional constraint for the maximum route length/duration/cost.
* st is the service time. However, also the D should be modified with
service times to allow straight computation of the TSP solutions (see
above)
* K is the optional parameter specifying the required number of vehicles.
The algorithm is only allowed to find solutions with this many vehicles.
* minimize_K, if set to True (default), makes the minimum number of routes
the primary and the solution cost the secondary objective. If set False
the algorithm optimizes for mimimum solution / route cost by increasing
K as long as it seems beneficial. WARNING: the algorithm suits this use
case (cost at the objective) poorly and setting this option to False may
significantly increase the required CPU time.
* find_optimal_seeds if set to True, tries all possible Sweep start
positions / k-Means with N different seeds. If False, only one sweep
from the node closest to the depot is done / k-Means clustering is done
only once with one random seed value.
* seed_edge_weight_type specifies how to round off the distances from the
customer nodes (points) to the seed points. Supports all TSPLIB edge
weight types.
Note1: The GAP is optimized using Gurobi solver. If L constraint is set,
the side constraints may make the GAP instance tricky to solve and it
is advisable to set a sensible timeout with config.MAX_MIP_SOLVER_RUNTIME
* use_adaptive_L_constraint_weights if set True, and the L constraint is
set, the algorithm adaptively adjusts the route cost approximation of the
relevant side constraints so that a solution which is not L infeasible or
GAP infeasible is found. The exact handling of L consraint is vague in
(Fisher and Jaikumar 1981) and this was our best guess on how the
feasible region of the problem can be found. Note that if GAP solver is
terminated due to a timeout, the adaptive multipier is increased and
GAP solution is attempted again. However, if increase_K_on_failure is set,
(see below) it takes priority over this.
* increase_K_on_failure (default False) is another countermeasure against
long running GAP solving attempts for problem instances without L
constraint (if there is L constraint, and use_adaptive_L_constraint_-
weights is enabled, this is ignored) or instances where K estimation
does not work and it takes excessively long time to check all initial
seed configurations before increasing K. If Gurobi timeout is encountered
or the solution is GAP infeasible, and this option is enabled, the K is
temporately increased, new seeds points generated for current sweep start
location and another GAP solution attempt is made. K is allowed to
increased temporarely up to 10% of the mimimum K allowed (or 1, whichever
is larger).
Note2: logger controls the debug level but running the script with
Python -O option disables all debug output.
Fisher, M. L. and Jaikumar, R. (1981), A generalized assignment heuristic
for vehicle routing. Networks, 11: 109-124. doi:10.1002/net.3230110205
""" #TODO: other alternatives
# customers with maximum demand or most distant customer from origin
if seed_method=="cones":
seed_f = _sweep_seed_points
if seed_method=="kmeans":
seed_f = _kmeans_seed_points
if seed_method=="large_demands":
if not C: raise ValueError("""The "large_demands" seed initialization method requires demands and C constraint to be known.""")
seed_f = _large_demand_seed_points
if seed_method=="ends_of_thoroughfares":
seed_f = _end_of_thoroughfares_seed_points
int_dists = issubclass(D.dtype.type, np.integer)
if seed_edge_weight_type=="EXPLICIT":
seed_edge_weight_type = "EUC_2D" if int_dists else "EXACT_2D"
if not points:
raise ValueError("The algorithm requires 2D coordinates for the points")
N = len(D)
if K:
startK = K
maxK = K
else:
# start from the smallest K possible
if C:
startK = int(ceil(sum(d)/C))
elif L:
# find a lower bound by checking how many visits from the TSP
# tour need to add to have any chance of making this L feasible.
_,tsp_f = solve_tsp(D, list(range(1,N)))
shortest_depot_edges = list(D[0,1:])
shortest_depot_edges.sort()
startK = int(ceil(tsp_f/L))
while True:
if tsp_f+sum(shortest_depot_edges[:startK*2])<=startK*L:
break
startK+=1
else:
raise ValueError("If C and L have not been set, K is required")
maxK = N-1
# We only need first row of the distance matrix to calculcate insertion
# costs for GAP objective function
D_0 = np.copy( D[0,:] )
best_sol = None
best_f = None
best_K = None
seed_trial = 0
incK = 0
maxKinc = max(startK+1, int(startK*INCREASE_K_ON_FAILURE_UPTO))
L_ctr_multipiler = L_MPLR_DEFAULT
if L and use_adaptive_L_constraint_weights:
# Adaptive L constraint multipier
L_ctr_multipiler = L_ADAPTIVE_MPLR_INIT
L_ctr_multipiler_tries = 0
try:
for currentK in range(startK, maxK+1):
found_improving_solution_for_this_K = False
seed_trial=0
while True:
if __debug__:
log(DEBUG, "ITERATION:K=%d, trial=%d, L_ctr_mul=%.6f\n"%
(currentK+incK,seed_trial,L_ctr_multipiler))
log(DEBUG-1, "Getting %d seed points...\n"%(currentK+incK))
# Get seed points
seed_points = seed_f(points, D, d, C, currentK+incK, seed_trial)
if __debug__:
log(DEBUG-1, "...got seed points %s\n"%str(seed_points))
# Extend the distance matrix with seed distances
S = calculate_D(seed_points, points, seed_edge_weight_type)
if st:
# include the "leaving half" of the service_time in the
# distances (the other half is already added to the D
# prior to gapvrp_init)
halftst = int(st/2) if int_dists else st/2.0
S[:,1:] += halftst
D_s = np.vstack( (D_0, S) )
GAP_infeasible = False
L_infeasible = False
solution = [0]
sol_f = 0
solved = False
sol_K = 0
take_next_seed = False
try:
# Distribute the nodes to vehicles using the approxmate
# service costs in D_s and by solving it as GAP
#
#TODO: the model has the same dimensions for all iterations
# with the same K and only the weights differ. Consider
# replacing the coefficient matrix e.g. via C interface
#https://stackoverflow.com/questions/33461329
assignments = _solve_gap(N, D_s, d, C, currentK+incK, L,
L_ctr_multipiler)
if not assignments:
if __debug__:
log(DEBUG, "INFEASIBILITY: GAP infeasible solution")
corrective_action = "try with another seed = %d"%seed_trial
GAP_infeasible = True
else:
if __debug__:
log(DEBUG-1, "Assignments = %s"%str(assignments))
# Due to floating point inaccuracies in L constrained
# cases the feasrelax may be used, which, in turn, can
# in some corner cases return solutions that are not
# really feasible. Make sure it is not the case
if L: served = set([0])
for route_nodes in assignments:
if not route_nodes:
continue
route,route_l = solve_tsp(D, [0]+route_nodes)
# Check for feasibility violations due to feasrelax
if L:
served |= set(route_nodes)
if C and d and totald(route,d)-C_EPS>C:
if __debug__:
log(DEBUG, "INFEASIBILITY: feasRelax "+
"caused GAP infeasible solution "+
" (capacity constraint violation)")
GAP_infeasible = True
break # the route loop
solution += route[1:]
sol_f += route_l
sol_K += 1
if __debug__:
log(DEBUG-2, "DEBUG: Got TSP solution %s (%.2f)"%
(str(route),route_l))
if L and route_l-S_EPS>L:
if __debug__:
log(DEBUG, "INFEASIBILITY: L infeasible solution")
L_infeasible = True
break # break route for loop
# Check for feasibility violations due to feasrelax.
# Have all customers been served?
if not GAP_infeasible and not L_infeasible and\
L and len(served)<len(D):
if __debug__:
log(DEBUG, "INFEASIBILITY: feasRelax caused GAP "+
"infeasible solution (all customers "+
"are not served)")
GAP_infeasible = True
if not GAP_infeasible and not L_infeasible:
if __debug__:
log(DEBUG, "Yielded feasible solution = %s (%.2f)"%(str(solution), sol_f))
solved = True
except GurobiError as grbe:
if __debug__: log(WARNING, str(grbe))
if L and use_adaptive_L_constraint_weights and \
L_ctr_multipiler_tries<L_ADAPTIVE_MPLR_MAX_TRIES:
L_ctr_multipiler+=L_ADAPTIVE_MPLR_INC
L_ctr_multipiler_tries+=1
if __debug__: corrective_action = "Gurobi timeout, try with another L_ctr_multipiler = %.2f"%L_ctr_multipiler
elif increase_K_on_failure and currentK+incK+1<=maxKinc:
if L and use_adaptive_L_constraint_weights and\
L_ctr_multipiler_tries>=L_ADAPTIVE_MPLR_MAX_TRIES:
# try with all multiplier values for larger K
L_ctr_multipiler = L_ADAPTIVE_MPLR_INIT
L_ctr_multipiler_tries = 0
incK+=1
if __debug__: corrective_action = "Gurobi timeout, temporarely increase K by %d"%incK
elif find_optimal_seeds:
take_next_seed = True
else:
grbe.message+=", consider increasing the MAX_MIP_SOLVER_RUNTIME in config.py"
raise grbe
else:
if L and use_adaptive_L_constraint_weights:
## Adaptive GAP/L constraint multiplier reset
# reset multiplier in case it the L feasibility was not violated
# or it has reached the max_value.
if solved or L_ctr_multipiler_tries>=L_ADAPTIVE_MPLR_MAX_TRIES:
L_ctr_multipiler = L_ADAPTIVE_MPLR_INIT
L_ctr_multipiler_tries = 0
take_next_seed = True
if not solved and increase_K_on_failure and currentK+incK+1<=maxKinc:
incK+=1
take_next_seed = False
if __debug__: corrective_action = "temporarely increase K by %d"%incK
else:
if __debug__: corrective_action = "try with another seed = %d"%seed_trial
## Adaptive GAP/L constraint multiplier update
else:
L_ctr_multipiler+=L_ADAPTIVE_MPLR_INC
L_ctr_multipiler_tries+=1
if __debug__: corrective_action = "try with another L_ctr_multipiler = %.2f"%L_ctr_multipiler
else:
if not solved and increase_K_on_failure and currentK+incK+1<=maxKinc:
incK+=1
if __debug__: corrective_action = "temporarely increase K by %d"%incK
else:
take_next_seed = True
# Store the best so far
if solved:
if is_better_sol(best_f, best_K, sol_f, sol_K, minimize_K):
best_sol = solution
best_f = sol_f
best_K = sol_K
found_improving_solution_for_this_K = True
else:
# No feasible solution was found for this trial (max route cost
# or capacity constraint was violated).
if __debug__:
if GAP_infeasible or L_infeasible:
log(DEBUG, "Constraint is violated, "+corrective_action)
else:
log(DEBUG, "Continuing search, "+corrective_action)
if take_next_seed:
incK = 0
seed_trial+=1
if not find_optimal_seeds:
break # seed loop, possibly try next K
if seed_trial==N:
incK = 0
break # seed loop, possibly try next K
if minimize_K:
# do not try different K if we found a solution
if best_sol:
break # K loop
else: # not minimize_K
# We already have an feasible solution for K<K_current, and could
# not find a better solution than that on K_current. Therefore, it
# is improbable we will find one even if we increase K and we
# should stop here.
if best_sol and not found_improving_solution_for_this_K:
break
except KeyboardInterrupt: #or SIGINT
# pass on the current best_sol
raise KeyboardInterrupt(best_sol)
return best_sol
|
110ee76668283efdf91716c3235de4b5719b81f3
| 19,780 |
def data_block(block_str):
""" Parses all of the NASA polynomials in the species block of the
mechanism file and subsequently pulls all of the species names
and thermochemical properties.
:param block_str: string for thermo block
:type block_str: str
:return data_block: all the data from the data string for each species
:rtype: list(list(str/float))
"""
thm_dstr_lst = data_strings(block_str)
thm_dat_lst = tuple(zip(
map(species_name, thm_dstr_lst),
map(temperatures, thm_dstr_lst),
map(low_coefficients, thm_dstr_lst),
map(high_coefficients, thm_dstr_lst)))
return thm_dat_lst
|
bfcf457e164002cd4ab8c6c852117ebe24f437ab
| 19,781 |
from functools import reduce
from re import S
def risch_norman(f, x, rewrite=False):
"""Computes indefinite integral using extended Risch-Norman algorithm,
also known as parallel Risch. This is a simplified version of full
recursive Risch algorithm. It is designed for integrating various
classes of functions including transcendental elementary or special
functions like Airy, Bessel, Whittaker and Lambert.
The main difference between this algorithm and the recursive one
is that rather than computing a tower of differential extensions
in a recursive way, it handles all cases in one shot. That's why
it is called parallel Risch algorithm. This makes it much faster
than the original approach.
Another benefit is that it doesn't require to rewrite expressions
in terms of complex exponentials. Rather it uses tangents and so
antiderivatives are being found in a more familliar form.
Risch-Norman algorithm can also handle special functions very
easily without any additional effort. Just differentiation
method must be known for a given function.
Note that this algorithm is not a decision procedure. If it
computes an antiderivative for a given integral then it's a
proof that such function exists. However when it fails then
there still may exist an antiderivative and a fallback to
recurrsive Risch algorithm would be necessary.
The question if this algorithm can be made a full featured
decision procedure still remains open.
For more information on the implemented algorithm refer to:
[1] K. Geddes, L.Stefanus, On the Risch-Norman Integration
Method and its Implementation in Maple, Proceedings of
ISSAC'89, ACM Press, 212-217.
[2] J. H. Davenport, On the Parallel Risch Algorithm (I),
Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157.
[3] J. H. Davenport, On the Parallel Risch Algorithm (III):
Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6.
[4] J. H. Davenport, B. M. Trager, On the Parallel Risch
Algorithm (II), ACM Transactions on Mathematical
Software 11 (1985), 356-362.
"""
f = Basic.sympify(f)
if not f.has(x):
return f * x
rewritables = {
(sin, cos, cot) : tan,
(sinh, cosh, coth) : tanh,
}
if rewrite:
for candidates, rule in rewritables.iteritems():
f = f.rewrite(candidates, rule)
else:
for candidates in rewritables.iterkeys():
if f.has(*candidates):
break
else:
rewrite = True
terms = components(f)
for g in set(terms):
h = g.diff(x)
if not isinstance(h, Basic.Zero):
terms |= components(h)
terms = [ g for g in terms if g.has(x) ]
V, in_terms, out_terms = [], [], {}
for i, term in enumerate(terms):
V += [ Symbol('x%s' % i) ]
N = term.count_ops(symbolic=False)
in_terms += [ (N, term, V[-1]) ]
out_terms[V[-1]] = term
in_terms.sort(lambda u, v: int(v[0] - u[0]))
def substitute(expr):
for _, g, symbol in in_terms:
expr = expr.subs(g, symbol)
return expr
diffs = [ substitute(g.diff(x)) for g in terms ]
denoms = [ g.as_numer_denom()[1] for g in diffs ]
denom = reduce(lambda p, q: lcm(p, q, V), denoms)
numers = [ normal(denom * g, *V) for g in diffs ]
def derivation(h):
return Basic.Add(*[ d * h.diff(v) for d, v in zip(numers, V) ])
def deflation(p):
for y in p.atoms(Basic.Symbol):
if not isinstance(derivation(p), Basic.Zero):
c, q = p.as_polynomial(y).as_primitive()
return deflation(c) * gcd(q, q.diff(y))
else:
return p
def splitter(p):
for y in p.atoms(Basic.Symbol):
if not isinstance(derivation(y), Basic.Zero):
c, q = p.as_polynomial(y).as_primitive()
q = q.as_basic()
h = gcd(q, derivation(q), y)
s = quo(h, gcd(q, q.diff(y), y), y)
c_split = splitter(c)
if s.as_polynomial(y).degree() == 0:
return (c_split[0], q * c_split[1])
q_split = splitter(normal(q / s, *V))
return (c_split[0]*q_split[0]*s, c_split[1]*q_split[1])
else:
return (S.One, p)
special = []
for term in terms:
if isinstance(term, Basic.Function):
if isinstance(term, Basic.tan):
special += [ (1 + substitute(term)**2, False) ]
elif isinstance(term.func, tanh):
special += [ (1 + substitute(term), False),
(1 - substitute(term), False) ]
#elif isinstance(term.func, Basic.LambertW):
# special += [ (substitute(term), True) ]
ff = substitute(f)
P, Q = ff.as_numer_denom()
u_split = splitter(denom)
v_split = splitter(Q)
s = u_split[0] * Basic.Mul(*[ g for g, a in special if a ])
a, b, c = [ p.as_polynomial(*V).degree() for p in [s, P, Q] ]
candidate_denom = s * v_split[0] * deflation(v_split[1])
monoms = monomials(V, 1 + a + max(b, c))
linear = False
while True:
coeffs, candidate, factors = [], S.Zero, set()
for i, monomial in enumerate(monoms):
coeffs += [ Symbol('A%s' % i, dummy=True) ]
candidate += coeffs[-1] * monomial
candidate /= candidate_denom
polys = [ v_split[0], v_split[1], u_split[0]] + [ s[0] for s in special ]
for irreducibles in [ factorization(p, linear) for p in polys ]:
factors |= irreducibles
for i, irreducible in enumerate(factors):
if not isinstance(irreducible, Basic.Number):
coeffs += [ Symbol('B%s' % i, dummy=True) ]
candidate += coeffs[-1] * Basic.log(irreducible)
h = together(ff - derivation(candidate) / denom)
numerator = h.as_numer_denom()[0].expand()
if not isinstance(numerator, Basic.Add):
numerator = [numerator]
collected = {}
for term in numerator:
coeff, depend = term.as_independent(*V)
if depend in collected:
collected[depend] += coeff
else:
collected[depend] = coeff
solutions = solve(collected.values(), coeffs)
if solutions is None:
if linear:
break
else:
linear = True
else:
break
if solutions is not None:
antideriv = candidate.subs_dict(solutions)
for C in coeffs:
if C not in solutions:
antideriv = antideriv.subs(C, S.Zero)
antideriv = simplify(antideriv.subs_dict(out_terms)).expand()
if isinstance(antideriv, Basic.Add):
return Basic.Add(*antideriv.as_coeff_factors()[1])
else:
return antideriv
else:
if not rewrite:
return risch_norman(f, x, rewrite=True)
else:
return None
|
12dd2cbd724566344d73bff48ed46b33d2b84730
| 19,782 |
from .model_store import download_model
import os
def get_vgg(blocks,
bias=True,
use_bn=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create VGG model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default False
Whether to use BatchNorm layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 11:
layers = [1, 1, 2, 2, 2]
elif blocks == 13:
layers = [2, 2, 2, 2, 2]
elif blocks == 16:
layers = [2, 2, 3, 3, 3]
elif blocks == 19:
layers = [2, 2, 4, 4, 4]
else:
raise ValueError("Unsupported VGG with number of blocks: {}".format(blocks))
channels_per_layers = [64, 128, 256, 512, 512]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = VGG(
channels=channels,
bias=bias,
use_bn=use_bn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
|
ac7cf08fd3faf386edd896672519dbd96e82953f
| 19,783 |
import os
import logging
def configure_logger():
"""
Declare and validate existence of log directory; create and configure logger object
:return: instance of configured logger object
"""
log_dir = os.path.join(os.getcwd(), 'log')
create_directory_if_not_exists(None, log_dir)
configure_logging(log_dir)
logger = logging.getLogger('importer_logger')
return logger
|
301fd676a3a680a08b14a16ea4dcc6c41fb2af9b
| 19,784 |
def preprocess_spectra(fluxes, interpolated_sn, sn_array, y_offset_array):
"""preprocesses a batch of spectra, adding noise according to specified sn profile, and applies continuum error
INPUTS
fluxes: length n 2D array with flux values for a spectrum
interpolated_sn: length n 1D array with relative sn values for each pixel
sn_array: 2d array dims (num examples, 1) with sn selected for each example
y_offset_array: same as sn array but with y_offsets
OUTPUTS
fluxes: length n 2D array with preprocessed fluxes for a spectrum
"""
n_pixels = np.size(fluxes[0, :])
n_stars = np.size(fluxes[:, 1])
base_stddev = 1.0 / sn_array[:, 0]
for i in range(n_stars):
noise_array = np.random.normal(0.0, scale=base_stddev[i], size=n_pixels)
fluxes[i, :] += noise_array*interpolated_sn
fluxes += y_offset_array
return fluxes
|
552d42b3835f3bc60930ae6f05f1544c924e940b
| 19,785 |
import json
def read_config(path=None):
"""
Function for reading in the config.json file
"""
#create the filepath
if path:
if "config.json" in path:
file_path = path
else:
file_path = f"{path}/config.json"
else:
file_path = "config.json"
#load in config
try:
with open(file_path, "r") as json_file:
config = json.load(json_file)
except Exception:
raise Exception("Your config file is corrupt (wrong syntax, missing values, ...)")
return config
|
3e3612879645509acb74f184085f7e584afbf822
| 19,786 |
import json
def ema_incentive(ds):
"""
Parse stream name 'incentive--org.md2k.ema_scheduler--phone'. Convert json column to multiple columns.
Args:
ds: Windowed/grouped DataStream object
Returns:
ds: Windowed/grouped DataStream object.
"""
schema = StructType([
StructField("timestamp", TimestampType()),
StructField("localtime", TimestampType()),
StructField("user", StringType()),
StructField("version", IntegerType()),
StructField("incentive", FloatType()),
StructField("total_incentive", FloatType()),
StructField("ema_id", StringType()),
StructField("data_quality", FloatType())
])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def parse_ema_incentive(user_data):
all_vals = []
for index, row in user_data.iterrows():
ema = row["incentive"]
if not isinstance(ema, dict):
ema = json.loads(ema)
incentive = ema["incentive"]
total_incentive = ema["totalIncentive"]
ema_id = ema["emaId"]
data_quality = ema["dataQuality"]
all_vals.append([row["timestamp"],row["localtime"], row["user"],1,incentive,total_incentive,ema_id,data_quality])
return pd.DataFrame(all_vals,columns=['timestamp','localtime', 'user', 'version','incentive','total_incentive','ema_id','data_quality'])
# check if datastream object contains grouped type of DataFrame
if not isinstance(ds._data, GroupedData):
raise Exception(
"DataStream object is not grouped data type. Please use 'window' operation on datastream object before running this algorithm")
data = ds._data.apply(parse_ema_incentive)
return DataStream(data=data, metadata=Metadata())
|
ad6d6a08906dc5aab5a1ea2d0895eb84eac44f44
| 19,787 |
def read_fingerprint(finger_name: str) -> np.ndarray:
"""
Given the file "x_y_z" name this function returns a vector with
the fingerprint data.
:param finger_name: A string with the format "x_y_z".
:return: A vector (1x256) containing the fingerprint data.
"""
base_path = "rawData/QFM16_"
path = base_path + finger_name + ".txt"
return read_finger_file(path)
|
21b88afffdb016699ad6a0ed635931096ebe8bc1
| 19,788 |
import matplotlib.pyplot as plt
def plot_tree(T, res=None, title=None, cmap_id="Pastel2"):
"""Plots a given tree, containing hierarchical segmentation.
Parameters
----------
T: mir_eval.segment.tree
A tree object containing the hierarchical segmentation.
res: float
Frame-rate resolution of the tree (None to use seconds).
title: str
Title for the plot. `None` for no title.
cmap_id: str
Color Map ID
"""
def round_time(t, res=0.1):
v = int(t / float(res)) * res
return v
# Get color map
cmap = plt.get_cmap(cmap_id)
# Get segments by level
level_bounds = []
for level in T.levels:
if level == "root":
continue
segments = T.get_segments_in_level(level)
level_bounds.append(segments)
# Plot axvspans for each segment
B = float(len(level_bounds))
#plt.figure(figsize=figsize)
for i, segments in enumerate(level_bounds):
labels = utils.segment_labels_to_floats(segments)
for segment, label in zip(segments, labels):
#print i, label, cmap(label)
if res is None:
start = segment.start
end = segment.end
xlabel = "Time (seconds)"
else:
start = int(round_time(segment.start, res=res) / res)
end = int(round_time(segment.end, res=res) / res)
xlabel = "Time (frames)"
plt.axvspan(start, end,
ymax=(len(level_bounds) - i) / B,
ymin=(len(level_bounds) - i - 1) / B,
facecolor=cmap(label))
# Plot labels
L = float(len(T.levels) - 1)
plt.yticks(np.linspace(0, (L - 1) / L, num=L) + 1 / L / 2.,
T.levels[1:][::-1])
plt.xlabel(xlabel)
if title is not None:
plt.title(title)
plt.gca().set_xlim([0, end])
|
40baf9a5f62ee139ddc905e840bc982b67166bb8
| 19,789 |
def read_data(data_path):
"""This function reads in the histogram data from the provided path
and returns a pandas dataframe
"""
histogram_df = None # Your code goes here
return histogram_df
|
5b927246c9298743c22d8a9fc497175aa9600c24
| 19,790 |
def _interpolate_face_to_bar(nodes, eid, eid_new, nid_new, mid, area, J, fbdf,
inid1, inid2, inid3,
xyz1_local, xyz2_local, xyz3_local,
xyz1_global, xyz2_global, xyz3_global,
nodal_result,
local_points, global_points,
geometry, result,
rod_elements, rod_nids, rod_xyzs,
plane_atol, plane_bdf_offset=0.):
"""
These edges have crossings. We rework:
y = m*x + b
into the long form:
y = (y2-y1) / (x2-x1) * (x-x1) + y1
to get:
y = y2 * (x-x1)/(x2-x1) + y1 * (1 - (x-x1)/(x2-x1))
or:
p = (x-x1)/(x2-x1) # percent
y = y2 * p + y1 * (1 - p)
Then we sub the y for the point (3 floats) and sub out x for the
y-coordinate:
percent = (y - y1_local) / (y2_local - y1_local)
avg_xyz = xyz2 * percent + xyz1 * (1 - percent)
Then we just crank the formula where we set the value of "y" to 0.0:
percent = (0. - y1_local) / (y2_local - y1_local)
That's how you do 1 edge, so we do this 3 times. One of the edges
won't be a crossing (the percent is not between 0 and 1.), but 2
edges are. Thus, two points create a line.
We also need to handle the dot case. We're using a triangle
(nodes 1, 2, and 3), so we have 3 vectors:
e0 = e12 = p2 - p1
e1 = e13 = p3 - p1
e2 = e23 = p3 - p2
As metioned previously, only two vectors are used (e.g., e12 and e13).
When combined with the percentage, we find that for a dot, using e12
and e13, node 1 must be a source (both vectors originate from node 1).
Thus the percentages for e12=0. and e13=0. Similarly, node 3 is a
sink (both vectors end at node 3) and node 2 is a corner/mixed (one
vector ends at node 2). In summary:
Node Combination Percentages for Dot
==== =========== ===================
1 e12, e13 0., 0.
2 e12, e23 1., 0.
3 e13, e23 1., 1.
"""
#print('edge =', edge)
#if eid == 11029:
#print('eid=%s inid1=%s, inid2=%s, inid3=%s' % (eid, inid1, inid2, inid3))
#print('nid1=%s, nid2=%s, nid3=%s' % (nodes[inid1], nodes[inid2], nodes[inid3]))
edgesi = (
# (nid_index, xyz in local frame, xyz in global frame
((inid1, xyz1_local, xyz1_global), (inid2, xyz2_local, xyz2_global)), # edge 1-2
((inid2, xyz2_local, xyz2_global), (inid3, xyz3_local, xyz3_global)), # edge 2-3
((inid1, xyz1_local, xyz1_global), (inid3, xyz3_local, xyz3_global)), # edge 1-3
)
nid_a_prime = nid_new
nid_b_prime = nid_new + 1
#projected_points = []
#lengths = []
# we need to prevent dots
msg = ''
results_temp = []
geometry_temp = []
i_values = []
percent_values = []
local_points_temp = []
global_points_temp = []
is_result = nodal_result is not None
for i, (edge1, edge2) in enumerate(edgesi):
(inid_a, p1_local, p1_global) = edge1
(inid_b, p2_local, p2_global) = edge2
#print(' inid_a=%s, p1_local=%s, p1_global=%s' % (inid_a, p1_local, p1_global))
#print(' inid_b=%s, p2_local=%s, p2_global=%s' % (inid_b, p2_local, p2_global))
py1_local = p1_local[1]
py2_local = p2_local[1]
#length = np.linalg.norm(p2_global - p1_global)
#lengths.append(length)
dy = py2_local - py1_local
if np.allclose(dy, 0.0, atol=plane_atol):
# We choose to ignore the triangle edge on/close to the symmetry plane.
# Instead, we use the neighboring projected edges as it's more correct.
# Also, that way do things in a more consistent way.
#
continue
# the second number is on the top
percent = (0. - py1_local) / dy
abs_percent_shifted = abs(percent - 0.5)
#print(' percent = %s' % percent)
#print(' abs_percent_shifted = %s' % abs_percent_shifted)
# catching the case where all edges will intersect with the plane
# if the edges are extended to infinity
#
# a "valid" percent is ranged from [0.-tol, 1.+tol], so:
# b = [0.-tol, 1.+tol] - 0.5 = [-0.5-tol, 0.5+tol] # is the same thing
# in_range = abs(b) < 0.5+tol
#
in_range = abs_percent_shifted < 0.5 + plane_atol
if not in_range:
#print(' **too big...\n')
continue
cut_edgei = [inid_a, inid_b]
cut_edgei.sort()
avg_local = p2_local * percent + p1_local * (1 - percent)
avg_global = p2_global * percent + p1_global * (1 - percent)
#projected_points.append(avg_global)
xl, yl, zl = avg_local
xg, yg, zg = avg_global
local_points_temp.append(avg_local)
global_points_temp.append(avg_global)
#print(' inid1=%s inid2=%s edge1=%s' % (inid1, inid2, str(edge1)))
#print(' xyz1_local=%s xyz2_local=%s' % (xyz1_local, xyz2_local))
#print(' avg_local=%s' % avg_local)
#print(' avg_global=%s' % avg_global)
sid = 1
out_grid = ['GRID', nid_new, None, ] + list(avg_local)
#rod_elements, rod_nids, rod_xyzs
rod_nids.append(nid_new)
rod_xyzs.append(avg_local)
out_grid[4] += plane_bdf_offset
msg += print_card_8(out_grid)
#print(' ', out_grid)
#print(' plane_atol=%s dy=%s\n' % (plane_atol, dy))
if is_result:
result1 = nodal_result[inid_a]
result2 = nodal_result[inid_b]
resulti = result2 * percent + result1 * (1 - percent)
out_temp = ['TEMP', sid, nid_new, resulti] #+ resulti.tolist()
msg += print_card_8(out_temp)
geometry_temp.append([eid, nid_new] + cut_edgei)
# TODO: doesn't handle results of length 2+
results_temp.append([xl, yl, zl, xg, yg, zg, resulti])
else:
geometry_temp.append([eid, nid_new] + cut_edgei)
results_temp.append([xl, yl, zl, xg, yg, zg])
i_values.append(i)
percent_values.append(percent)
nid_new += 1
#p1 = global_points[-2]
#p2 = global_points[-1]
#dxyz = np.linalg.norm(p2 - p1)
if _is_dot(i_values, percent_values, plane_atol):
#print('dot!!!')
mid = 2
return eid_new, nid_new
fbdf.write(msg)
local_points.extend(local_points_temp)
global_points.extend(global_points_temp)
geometry.extend(geometry_temp)
result.extend(results_temp)
#projected_points = np.array(projected_points)
#p1 = projected_points[0, :]
#p2 = projected_points[1, :]
#min_edge_length = min(lengths)
# hack to get rid of dot intersections
#dist = np.linalg.norm(p2 - p1)
#if dist < min_edge_length / 2.:
##print(projected_points)
#print('removing dot...inid1=%s inid2=%s d=%s mel=%s' % (
#inid1, inid2, dist, min_edge_length))
#for unused_i in range(2):
#global_points.pop()
#local_points.pop()
#geometry.pop()
#result.pop()
#return eid_new, nid_new
#print(' cut_edge =', cut_edge)
# if there are 3 nodes in the cut edge, it's fine
# we'll take the first two
conrod = ['CONROD', eid, nid_a_prime, nid_b_prime, mid, area, J]
#print(' ', conrod)
fbdf.write(print_card_8(conrod))
rod_elements.append([eid, nid_a_prime, nid_b_prime])
eid_new += 1
nid_new += 2
return eid_new, nid_new
|
4b35195feb6d41176f0e1daa02c90008a05a75b0
| 19,791 |
def get_number_of_tickets():
"""Get number of tickets to enter from user"""
num_tickets = 0
while num_tickets == 0:
try:
num_tickets = int(input('How many tickets do you want to get?\n'))
except:
print ("Invalid entry for number of tickets.")
return num_tickets
|
3703a4ed64867a9884328c09f0fd32e763265e95
| 19,792 |
def scrape(file):
""" scrapes rankings, counts from agg.txt file"""
D={}
G={}
with open(file,'r') as f:
for line in f:
L = line.split(' ')
qid = L[1][4:]
if qid not in D:
D[qid]=[]
G[qid]=[]
#ground truth
G[qid].append(int(L[0]))
#extract ranks
ranks=[]
for i in range(2,27):
[l,rank]=L[i].split(':')
if rank != 'NULL':
ranks.append(int(rank))
else:
ranks.append(0)
D[qid].append(ranks)
C={};N={}
for qid in D:
C[qid]=[]
N[qid] = len(D[qid])
A= np.array(D[qid])
assert A.shape[1] == 25
for i in range(25):
l = A[:,i]
ranked = np.where(l>0)[0]
ranking = ranked[np.argsort(l[ranked])]
C[qid].append(ranking)
#pickle.dump(C,open('MQ-lists.p','wb'))
return C,N,G
|
cad6525a9ae43f8366ae7e0efec14dd8b2921d27
| 19,793 |
import hashlib
import binascii
def private_key_to_WIF(private_key):
"""
Convert the hex private key into Wallet Import Format for easier wallet
importing. This function is only called if a wallet with a balance is
found. Because that event is rare, this function is not significant to the
main pipeline of the program and is not timed.
"""
digest = hashlib.sha256(binascii.unhexlify('80' + private_key)).hexdigest()
var = hashlib.sha256(binascii.unhexlify(digest)).hexdigest()
var = binascii.unhexlify('80' + private_key + var[0:8])
alphabet = chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
value = pad = 0
result = ''
for i, c in enumerate(var[::-1]): value += 256**i * c
while value >= len(alphabet):
div, mod = divmod(value, len(alphabet))
result, value = chars[mod] + result, div
result = chars[value] + result
for c in var:
if c == 0: pad += 1
else: break
return chars[0] * pad + result
|
20e7a767fdfb689f586fc566a94ec37f86a88e52
| 19,794 |
def woodbury_solve_vec(C, v, p):
""" Vectorzed woodbury solve --- overkill
Computes the matrix vector product (Sigma)^{-1} p
where
Sigma = CCt + diag(exp(a))
C = D x r real valued matrix
v = D dimensional real valued vector
The point of this function is that you never have to explicitly
represent the full DxD matrix to do this multiplication --- hopefully
that will cut down on memory allocations, allow for better scaling
in comments below, we write Sigma = CCt + A, where A = diag(exp(v))
"""
# set up vectorization
if C.ndim == 2:
C = np.expand_dims(C, 0)
assert v.ndim == 1, "v shape mismatched"
assert p.ndim == 1, "p shape mismatched"
v = np.expand_dims(v, 0)
p = np.expand_dims(p, 0)
bsize, D, r = np.shape(C)
# compute the inverse of the digaonal copmonent
inv_v = np.exp(-v) # A^{-1}
aC = C*inv_v[:, :, None] # A^{-1} C
# low rank, r x r term: (Ir + Ct A^{-1} C)
r_term = np.einsum('ijk,ijh->ikh', C, aC) + \
np.eye(r)
# compute inverse term (broadcasts over first axis)
# (Ir + Ct A^{-1} C)^{-1} (Ct A^{-1})
# in einsum notation:
# - i indexes minibatch (vectorization)
# - r indexes rank dimension
# - d indexes D dimension (obs dimension)
inv_term = np.linalg.solve(r_term, np.swapaxes(aC, 1, 2))
back_term = np.einsum('idr,id->ir', aC, p) # (Ct A^{-1} p)
Sigvs = inv_v*p - np.einsum('ird,ir->id', inv_term, back_term)
return Sigvs
|
875ab6709b82cd8865a4396b88cbd10a2847e608
| 19,795 |
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
|
7e0cbcd5d709405b32ba79c93cbf1ef6e98195f6
| 19,796 |
def pvfactors_engine_run(data, pvarray_parameters, parallel=0, mode='full'):
"""My wrapper function to launch the pvfactors engine in parallel. It is mostly for Windows use.
In Linux you can directly call run_parallel_engine. It uses MyReportBuilder to generate the output.
Args:
data (pandas DataFrame): The data to fit the model.
pvarray_parameters (dict): The pvfactors dict describing the simulation.
parallel (int, optional): Number of threads to launch. Defaults to 0 (just calls PVEngine.run_all_timesteps)
mode (str): full or fast depending on the type of back irraadiances. See pvfactors doc.
Returns:
pandas DataFrame: The results of the simulation, as desired in MyReportBuilder.
"""
n, row = _get_cut(pvarray_parameters['cut'])
rb = Report(n, row)
if parallel>1:
report = run_parallel_engine(rb, pvarray_parameters, data.index,
data.dni, data.dhi,
data.zenith, data.azimuth,
data.surface_tilt, data.surface_azimuth,
data.albedo, n_processes=parallel)
else:
pvarray = OrderedPVArray.init_from_dict(pvarray_parameters)
engine = PVEngine(pvarray)
engine.fit(data.index,
data.dni,
data.dhi,
data.zenith,
data.azimuth,
data.surface_tilt,
data.surface_azimuth,
data.albedo,
data.ghi)
if mode == 'full': report = engine.run_full_mode(rb.build)
else: report = engine.run_fast_mode(rb.build, pvrow_index=0, segment_index=0)
df_report = pd.DataFrame(report, index=data.index).fillna(0)
return df_report
|
1118838fed39e19e31997db9102fdba70283bed8
| 19,797 |
def get_service_button(button_text, service, element="#bottom_right_div"):
""" Generate a button that calls the std_srvs/Empty service when pressed """
print "Adding a service button!"
return str(render.service_button(button_text, service, element))
|
ce8e2a0ec029762c4e19210c9986aef7e78b55d9
| 19,798 |
def create_train_test_set(data, labels, test_size):
"""
Splits dataframe into train/test set
Inputs:
data: encoded dataframe containing encoded name chars
labels: encoded label dataframe
test_size: percentage of input data set to use for test set
Returns:
data_train: Subset of data set for training
data_test : Subset of data set for test
label_train: Subset of label set for training
label_test: Subset of label set for testing
"""
data_train, data_test, label_train, label_test = skMS.train_test_split(data, labels, test_size=test_size)
return [data_train, data_test, label_train, label_test]
|
ffeedf0cf4b7b8b1ffa552f0573d33263d216d99
| 19,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.