content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _search(self, *query):
"""Search for a match between the query terms and a tensor's Id, Tag, or Description.
https://github.com/OpenMined/PySyft/issues/2609
Note that the query is an AND query meaning that every item in the list of strings (query*)
must be found somewhere on the tensor in order for it to be included in the results.
Args:
query: A list of strings to match against.
me: A reference to the worker calling the search.
Returns:
A list of PointerTensors.
"""
results = list()
for key, obj in self._objects.items():
found_something = True
for query_item in query:
# If deserialization produced a bytes object instead of a string,
# make sure it's turned back to a string or a fair comparison.
if isinstance(query_item, bytes):
query_item = query_item.decode("ascii")
match = False
if query_item == str(key):
match = True
if isinstance(obj, FrameworkTensor):
if obj.tags is not None:
if query_item in obj.tags:
match = True
if obj.description is not None:
if query_item in obj.description:
match = True
if not match:
found_something = False
if found_something:
# set garbage_collect_data to False because if we're searching
# for a tensor we don't own, then it's probably someone else's
# decision to decide when to delete the tensor.
ptr = obj.create_pointer(garbage_collect_data=False, owner=sy.local_worker)
results.append(ptr)
return results | 8ffd9ae2fc0eb9f5f01c9cd3d27123a316bad655 | 13,100 |
def _FindLockNames(locks):
""" Finds the ids and descriptions of locks that given locks can block.
@type locks: dict of locking level to list
@param locks: The locks that gnt-debug delay is holding.
@rtype: dict of string to string
@return: The lock name to entity name map.
For a given set of locks, some internal locks (e.g. ALL_SET locks) can be
blocked even though they were not listed explicitly. This function has to take
care and list all locks that can be blocked by the locks given as parameters.
"""
lock_map = {}
if locking.LEVEL_NODE in locks:
node_locks = locks[locking.LEVEL_NODE]
if node_locks == locking.ALL_SET:
# Empty list retrieves all info
name_uuid_map = _GetNodeUUIDMap([])
else:
name_uuid_map = _GetNodeUUIDMap(node_locks)
for name in name_uuid_map:
lock_map["node/%s" % name_uuid_map[name]] = name
# If ALL_SET was requested explicitly, or there is at least one lock
# Note that locking.ALL_SET is None and hence the strange form of the if
if node_locks == locking.ALL_SET or node_locks:
lock_map["node/[lockset]"] = "joint node lock"
#TODO add other lock types here when support for these is added
return lock_map | 9e98eb03a4a27d8af95b734453a0a2add8248fab | 13,101 |
import time
import subprocess
def runtime(command: list, show=True, env=None):
"""Runs the command and returns the runtime."""
print('START:', *command)
t_start = time()
if show:
r = subprocess.run(command, env=env)
else:
r = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
t_end = time()
if r.returncode != 0:
raise Exception(r.stderr.decode())
t = t_end - t_start
print('END: runtime =', round(t, 3), 'seconds')
return t | eae28156c04e99fd8c7868f2e6b123d6d6f8ce41 | 13,102 |
def val2str(val):
"""Writes values to a string.
Args:
val (any): Any object that should be represented by a string.
Returns:
valstr (str): String representation of `val`.
"""
# Return the input if it's a string
if isinstance(val,str ): valstr=val
# Handle types where spaces are added
elif isinstance(val,tuple): valstr=repr(val).replace(', ',',')
elif isinstance(val,list ): valstr=repr(val).replace(', ',',')
elif isinstance(val,dict ): valstr=repr(val).replace(', ',',').replace(': ',':')
# Otherwise use repr()
else: valstr=repr(val)
# Return output
return valstr | c8f26553ceeeef841239c534815f86293f91086a | 13,103 |
def showItems(category_name):
"""Pulls all the Categories, the specific Category selected by the user
from the home page, all the items within that specific Category, and
then counts the number of items. All this information is displayed on the
items.html page.
"""
categories = session.query(Category).order_by(asc(Category.name))
category = session.query(Category).filter_by(name=category_name).one()
items = session.query(Item).filter_by(category_name=category_name).all()
itemscount = session.query(Item). \
filter_by(category_name=category_name). \
count()
return render_template('items.html', categories=categories, items=items,
category=category, itemscount=itemscount) | 0ef0c8dfca16a9f16a9d4a46c3d796e817710165 | 13,104 |
from datetime import datetime
import math
import calendar
def date_ranges():
"""Build date ranges for current day, month, quarter, and year.
"""
today = datetime.date.today()
quarter = math.floor((today.month - 1) / 3)
cycle = current_cycle()
return {
'month': (
today.replace(day=1),
today.replace(day=calendar.monthrange(today.year, today.month)[1]),
),
'quarter': (
today.replace(day=1, month=quarter * 3 + 1),
today.replace(
day=calendar.monthrange(today.year, quarter * 3 + 3)[1],
month=quarter * 3 + 3,
),
),
'year': (
today.replace(day=1, month=1),
today.replace(
day=calendar.monthrange(today.year, 12)[1],
month=12,
),
),
'cycle': (
datetime.date(
year=cycle - 1,
month=1,
day=1,
),
datetime.date(
year=cycle,
month=12,
day=calendar.monthrange(cycle, 12)[1],
),
),
} | 08feb47fe09d5a0d1c9e5e16bdcbd65d3e211e1e | 13,105 |
def FiskJohnsonDiscreteFuncBCKWD(r,F0,T):
"""Compute reverse Fourier-Bessel transformation via Fisk Johnson
procedure.
Compute reverse Fourier-Bessel transform (i.e. 0th order reverse Hankel
transform) using a rapidly convergent summation of a Fourier-Bessel
expansion following the metod introduced in Ref. [1] and further
detailed in Ref. [2].
Args:
r (numpy array, ndim=1): equispaced 1D grid of target coordinates.
F0 (numpy array, ndim=1): Fourier-Bessel transformed function
at discrete coordinates given by its scaled bessel zeros.
T (float): truncation threshold for objective function.
Returns:
f (numpy array, ndim=1): reverse Fourier-Bessel transform of input
function.
Notes:
- Fisk Johnson procedure for reverse Fourier-Bessel transformation.
- Implements Eq. (10) of Ref. [1].
- above truncation threshold it holds that f(r>T) = 0.
- on input F0 = F0[jm/T] for m = 0...N-1 where jm are the first
N zeros of the 0th order Bessel function in ascending order.
Refs:
[1] An Improved Method for Computing a Discrete Hankel Transform
H. Fisk Johnson
Comp. Phys. Commun. 43 (1987) 181-202
[2] Theory and operational rules for the discrete Hankel transform
N. Baddour, U. Chouinard
J. Opt. Soc. Am. A 32 (2015) 611
"""
# INITIALIZE EMPTY ARRAY FOR REVESE TRANSFORM
f = np.zeros(r.size)
# COMPUTE FIRST N ZEROS OF 0TH ORDER BESSEL FUNCTION IN ASCENDING ORDER
jm = scs.jn_zeros(0,F0.size)
# REVERSE TRANSFORM YIELDING ARBITRARY FUNCTION VALUES f(xT) FROM ITS
# FOURIER BESSEL TRANSFORM F(j[m]/T) m=0...N-1 AT SCALED BESSEL ZEROS
# j[m]/T. SEE EQ. (10) OF REF. [1].
x = r/T
f[x<1] = 2.0/T**2*np.sum(
F0*scs.j0(jm*x[x<1,np.newaxis])/scs.j1(jm)**2,
axis=1)
return f | f950323bcad980f8b0af94d5848b59cd3522adfc | 13,106 |
def make_waterfall_horizontal(data, layout):
"""Function used to flip the figure from vertical to horizontal.
"""
h_data = list(data)
h_data = []
for i_trace, trace in enumerate(list(data)):
h_data.append(trace)
prov_x = h_data[i_trace]['x']
h_data[i_trace]['x'] = list(h_data[i_trace]['y'])[::-1]
h_data[i_trace]['y'] = list(prov_x)[::-1]
h_data[i_trace]['orientation'] = 'h'
h_data[i_trace]['hoverinfo'] = hoverinfo_horizontal_(
h_data[i_trace]['hoverinfo'])
h_annotations = []
for i_ann, annotation in enumerate(list(layout['annotations'])):
h_annotations.append(annotation)
prov_x = h_annotations[i_ann]['x']
h_annotations[i_ann]['x'] = h_annotations[i_ann]['y']
h_annotations[i_ann]['y'] = prov_x
h_annotations.reverse()
h_layout = layout
h_layout['annotations'] = h_annotations
h_layout['xaxis'] = go.layout.XAxis({'title': 'Prediction score'})
h_layout['yaxis'] = go.layout.YAxis({'title': ''})
return h_data, h_layout | 0dacdefc4e36d10dea3404e1fc5e92ce6f7326be | 13,107 |
def parse_file(producer):
"""
Given a producer name, return appropriate parse function.
:param producer: NMR machine producer.
:return: lambda function that reads file according to producer.
"""
global path_to_directory
return {
"Agilent": (lambda: ng.agilent.read(dir=path_to_directory)),
"Bruker": (lambda: ng.bruker.read(dir=path_to_directory)),
"Varian": (lambda: ng.varian.read(dir=path_to_directory)),
}.get(producer) | cdb8e5e6f506b6d393eeefc13e982f246ea527b6 | 13,108 |
from typing import Union
from typing import Optional
def get_lon_dim_name_impl(ds: Union[xr.Dataset, xr.DataArray]) -> Optional[str]:
"""
Get the name of the longitude dimension.
:param ds: An xarray Dataset
:return: the name or None
"""
return _get_dim_name(ds, ['lon', 'longitude', 'long']) | 7f063690d8835b7cdd3298e14a5c35bd32025acc | 13,109 |
def logout():
"""Log out user."""
session.pop('eventbrite_token', None)
return redirect(url_for('index')) | 449690645fc19d72ef85636776f8d853ca65f4f8 | 13,110 |
def search(query="", casesense=False, filterout=[], subscribers=0, nsfwmode=2, doreturn=False, sort=None):
"""
Search for a subreddit by name
*str query = The search query
"query" = results where "query" is in the name
"*query" = results where "query" is at the end of the name
"query*" = results where "query" is at the beginning of the name
"*query*" = results where "query" is in the middle of the name
bool casesense = is the search case sensitive
list filterout = [list, of, words] to omit from search. Follows casesense
int subscribers = minimum number of subscribers
int nsfwmode =
0 - Clean only
1 - Dirty only
2 - All
int sort = The integer representing the sql column to sort by. Defaults
to no sort.
"""
querys = ''.join([c for c in query if c in GOODCHARS])
queryx = '%%%s%%' % querys
if '!' in query:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ?', [querys])
return cur.fetchone()
if nsfwmode in [0,1]:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ? AND nsfw=?', [queryx, subscribers, nsfwmode])
else:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ?', [queryx, subscribers])
results = []
if casesense is False:
querys = querys.lower()
filterout = [x.lower() for x in filterout]
if '*' in query:
positional = True
front = query[-1] == '*'
back = query[0] == '*'
if front and back:
mid = True
front = False
back = False
else:
mid = False
else:
positional = False
lenq = len(querys)
for item in fetchgenerator(cur):
name = item[SQL_NAME]
if casesense is False:
name = name.lower()
if querys not in name:
#print('%s not in %s' % (querys, name))
continue
if (positional and front) and (name[:lenq] != querys):
#print('%s not front %s (%s)' % (querys, name, name[:lenq]))
continue
if (positional and back) and (name[-lenq:] != querys):
#print('%s not back %s (%s)' % (querys, name, name[-lenq:]))
continue
if (positional and mid) and (querys not in name[1:-1]):
#print('%s not mid %s (%s)' % (querys, name, name[1:-1]))
continue
if any(filters in name for filters in filterout):
#print('%s not filter %s' % (querys, name))
continue
results.append(item)
if sort is not None:
results.sort(key=lambda x: x[sort], reverse=True)
if doreturn is True:
return results
else:
for item in results:
print(item) | c623ee11d507dbd7de84b109c2aa40866bb06dda | 13,111 |
def is_xh(filename):
"""
Detects if the given file is an XH file.
:param filename: The file to check.
:type filename: str
"""
info = detect_format_version_and_endianness(filename)
if info is False:
return False
return True | f0c33e5eed11522210dbc64a556e77f1c68d63c1 | 13,112 |
import os
import psutil
def is_parent_process_alive():
"""Return if the parent process is alive. This relies on psutil, but is optional."""
parent_pid = os.getppid()
if psutil is None:
try:
os.kill(parent_pid, 0)
except OSError:
return False
else:
return True
else:
try:
return psutil.pid_exists(parent_pid)
except (AttributeError, KeyboardInterrupt, Exception):
return False | 35ea736de6e5aec32a300c52745ad760723ba314 | 13,113 |
from typing import Tuple
from typing import List
from typing import Union
from typing import Callable
from typing import Any
def validate_func_kwargs(
kwargs: dict,
) -> Tuple[List[str], List[Union[str, Callable[..., Any]]]]:
"""
Validates types of user-provided "named aggregation" kwargs.
`TypeError` is raised if aggfunc is not `str` or callable.
Parameters
----------
kwargs : dict
Returns
-------
columns : List[str]
List of user-provied keys.
func : List[Union[str, callable[...,Any]]]
List of user-provided aggfuncs
Examples
--------
>>> validate_func_kwargs({'one': 'min', 'two': 'max'})
(['one', 'two'], ['min', 'max'])
"""
tuple_given_message = "func is expected but received {} in **kwargs."
columns = list(kwargs)
func = []
for col_func in kwargs.values():
if not (isinstance(col_func, str) or callable(col_func)):
raise TypeError(tuple_given_message.format(type(col_func).__name__))
func.append(col_func)
if not columns:
no_arg_message = "Must provide 'func' or named aggregation **kwargs."
raise TypeError(no_arg_message)
return columns, func | 81475f1467546f31a63a021c05a0c5f1adfd28a8 | 13,114 |
def mni152_to_fslr(img, fslr_density='32k', method='linear'):
"""
Projects `img` in MNI152 space to fsLR surface
Parameters
----------
img : str or os.PathLike or niimg_like
Image in MNI152 space to be projected
fslr_density : {'32k', '164k'}, optional
Desired output density of fsLR surface. Default: '32k'
method : {'nearest', 'linear'}, optional
Method for projection. Specify 'nearest' if `img` is a label image.
Default: 'linear'
Returns
-------
fsLR : (2,) tuple-of-nib.GiftiImage
Projected `img` on fsLR surface
"""
if fslr_density in ('4k', '8k'):
raise NotImplementedError('Cannot perform registration fusion to '
f'fsLR {fslr_density} space yet.')
return _vol_to_surf(img, 'fsLR', fslr_density, method) | 07129a79bc51e4655516573f517c4270d89800ed | 13,115 |
def parse_record(raw_record, _mode, dtype):
"""Parse CIFAR-10 image and label from a raw record."""
# Convert bytes to a vector of uint8 that is record_bytes long.
record_vector = tf.io.decode_raw(raw_record, tf.uint8)
# The first byte represents the label, which we convert from uint8 to int32
# and then to one-hot.
label = tf.cast(record_vector[0], tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(record_vector[1:_RECORD_BYTES],
[_NUM_CHANNELS, _HEIGHT, _WIDTH])
# Convert from [depth, height, width] to [height, width, depth], and cast
# as float32.
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
# normalise images to range 0-1
image = image/255.0
image = tf.cast(image, dtype)
return image, image | 278998f8ee1a126c6c248d8124bba1a4abdf7621 | 13,116 |
def makeSSHTTPClient(paramdict):
"""Creates a SingleShotHTTPClient for the given URL. Needed for Carousel."""
# get the "url" and "postbody" keys from paramdict to use as the arguments of SingleShotHTTPClient
return SingleShotHTTPClient(paramdict.get("url", ""),
paramdict.get("postbody", ""),
extraheaders = paramdict.get("extraheaders", None),
method = paramdict.get('method', None)
) | e7172d849e9c97baf07b9d97b914bf3e05551026 | 13,117 |
import glob
def getFiles(regex, camera, mjdToIngest = None, mjdthreshold = None, days = None, atlasroot='/atlas/', options = None):
"""getFiles.
Args:
regex:
camera:
mjdToIngest:
mjdthreshold:
days:
atlasroot:
options:
"""
# If mjdToIngest is defined, ignore mjdThreshold. If neither
# are defined, grab all the files.
# Don't use find, use glob. It treats the whole argument as a regex.
# e.g. directory = "/atlas/diff/" + camera "/5[0-9][0-9][0-9][0-9]", regex = *.ddc
if mjdToIngest:
if options is not None and options.difflocation is not None:
directory = options.difflocation.replace('CAMERA', camera).replace('MJD', str(mjdToIngest))
else:
directory = atlasroot + "diff/" + camera + "/" + str(mjdToIngest)
fileList = glob.glob(directory + '/' + regex)
else:
if mjdthreshold and days:
fileList = []
for day in range(days):
if options is not None and options.difflocation is not None:
directory = options.difflocation.replace('CAMERA', camera).replace('MJD', str(mjdthreshold + day))
else:
directory = atlasroot + "diff/" + camera + "/%d" % (mjdthreshold + day)
files = glob.glob(directory + '/' + regex)
if files:
fileList += files
else:
if options is not None and options.difflocation is not None:
directory = options.difflocation.replace('CAMERA', camera).replace('MJD', '/[56][0-9][0-9][0-9][0-9]')
else:
directory = atlasroot + "diff/" + camera + "/[56][0-9][0-9][0-9][0-9]"
fileList = glob.glob(directory + '/' + regex)
fileList.sort()
return fileList | 8d61d2e1900413d55e2cfc590fb6c969dd31b441 | 13,118 |
from typing import Sequence
from typing import Tuple
def chain(*args: GradientTransformation) -> GradientTransformation:
"""Applies a list of chainable update transformations.
Given a sequence of chainable transforms, `chain` returns an `init_fn`
that constructs a `state` by concatenating the states of the individual
transforms, and returns an `update_fn` which chains the update transformations
feeding the appropriate state to each.
Args:
*args: a sequence of chainable (init_fn, update_fn) tuples.
Returns:
A single (init_fn, update_fn) tuple.
"""
init_fns, update_fns = zip(*args)
def init_fn(params: Params) -> Sequence[OptState]:
return [fn(params) for fn in init_fns]
def update_fn(updates: Updates, state: OptState, params: Params = None
) -> Tuple[Updates, Sequence[OptState]]:
new_state = []
for s, fn in zip(state, update_fns): # pytype: disable=wrong-arg-types
updates, new_s = fn(updates, s, params)
new_state.append(new_s)
return updates, new_state
return GradientTransformation(init_fn, update_fn) | 089b30a4daec8be0033567da147be6dc4fab9990 | 13,119 |
def fibonacci_mult_tuple(fib0=2, fib1=3, count=10):
"""Returns a tuple with a fibonacci sequence using * instead of +."""
return tuple(fibonacci_mult_list(fib0, fib1, count)) | a43d1bd5bd2490ecbf85b305cc99929ac64a4908 | 13,120 |
import logging
def execute_in_process(f):
"""
Decorator.
Execute the function in thread.
"""
def wrapper(*args, **kwargs):
logging.info("Se ha lanzado un nuevo proceso")
process_f = Process(target=f, args=args, kwargs=kwargs)
process_f.start()
return process_f
return wrapper | 2a002ce48e07ec4b31066c1fad51cd271eaa6230 | 13,121 |
import copy
def castep_spectral_dispersion(computer, calc_doc, seed):
""" Runs a dispersion interpolation on top of a completed SCF calculation,
optionally running orbitals2bands and OptaDOS projected dispersion.
Parameters:
computer (:obj:`matador.compute.ComputeTask`): the object that will be calling CASTEP.
calc_doc (dict): the structure to run on.
seed (str): root filename of structure.
"""
LOG.info('Performing CASTEP spectral dispersion calculation...')
disp_doc = copy.deepcopy(calc_doc)
disp_doc['task'] = 'spectral'
disp_doc['spectral_task'] = 'bandstructure'
# disable checkpointing for BS/DOS by default, leaving just SCF
disp_doc['write_checkpoint'] = 'none'
disp_doc['pdos_calculate_weights'] = True
disp_doc['write_cell_structure'] = True
disp_doc['continuation'] = 'default'
required = []
forbidden = ['spectral_kpoints_mp_spacing']
computer.validate_calc_doc(disp_doc, required, forbidden)
success = computer.run_castep_singleshot(disp_doc, seed, keep=True, intermediate=True)
if disp_doc.get('write_orbitals'):
LOG.info('Planning to call orbitals2bands...')
_cache_executable = copy.deepcopy(computer.executable)
_cache_core = copy.deepcopy(computer.ncores)
computer.ncores = 1
computer.executable = 'orbitals2bands'
try:
success = computer.run_generic(intermediate=True, mv_bad_on_failure=False)
except Exception as exc:
computer.executable = _cache_executable
computer.ncores = _cache_core
LOG.warning('Failed to call orbitals2bands, with error: {}'.format(exc))
computer.ncores = _cache_core
computer.executable = _cache_executable
return success | 0f84e9b4d7a044fd50512093b51ec20425c98cbd | 13,122 |
def return_limit(x):
"""Returns the standardized values of the series"""
dizionario_limite = {'BENZENE': 5,
'NO2': 200,
'O3': 180,
'PM10': 50,
'PM2.5': 25}
return dizionario_limite[x] | 92d40eaef7b47c3a20b9bcf1f7fd72510a05d9b2 | 13,123 |
def npaths(x, y):
"""
Count paths recursively. Memoizing makes this efficient.
"""
if x>0 and y>0:
return npaths(x-1, y) + npaths(x, y-1)
if x>0:
return npaths(x-1, y)
if y>0:
return npaths(x, y-1)
return 1 | 487a1f35b1bf825ffaf6bbf1ed86eb51f6cf18e9 | 13,124 |
from datetime import datetime
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif datetime and isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
return repr(obj) | 6342a4fc1b4450181cee5a6287036b1f4ed38883 | 13,125 |
def create_results_dataframe(
list_results,
settings,
result_classes=None,
abbreviate_name=False,
format_number=False,
):
"""
Returns a :class:`pandas.DataFrame`.
If *result_classes* is a list of :class:`Result`, only the columns from
this result classes will be returned. If ``None``, the columns from
all results will be returned.
"""
list_series = []
for results in list_results:
builder = SeriesBuilder(settings, abbreviate_name, format_number)
for result in results:
prefix = result.getname().lower() + " "
if result_classes is None: # Include all results
builder.add_entity(result, prefix)
elif type(result) in result_classes:
if len(result_classes) == 1:
builder.add_entity(result)
else:
builder.add_entity(result, prefix)
list_series.append(builder.build())
return pd.DataFrame(list_series) | 638328936ee9207777fab504021efd83379ec93c | 13,126 |
def get_first_model_each_manufacturer(cars=cars):
"""return a list of matching models (original ordering)"""
first = []
for key,item in cars.items():
first.append(item[0])
return(first) | c6ec531ccc7a9bc48b404df34ec9c33066cd8717 | 13,127 |
def white(*N, mean=0, std=1):
""" White noise.
:param N: Amount of samples.
White noise has a constant power density. It's narrowband spectrum is therefore flat.
The power in white noise will increase by a factor of two for each octave band,
and therefore increases with 3 dB per octave.
"""
return std * np.random.randn(*N) + mean | 874dd75b3cd735f6b5642cd5567d7d0218af615b | 13,128 |
import random
def random_size_crop(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0)):
"""Randomly crop src with size. Randomize area and aspect ratio"""
h, w, _ = src.shape
area = w*h
for _ in range(10):
new_area = random.uniform(min_area, 1.0) * area
new_ratio = random.uniform(*ratio)
new_w = int(new_area*new_ratio)
new_h = int(new_area/new_ratio)
if random.uniform(0., 1.) < 0.5:
new_w, new_h = new_h, new_w
if new_w > w or new_h > h:
continue
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size)
return out, (x0, y0, new_w, new_h)
return random_crop(src, size) | 76c64b91e03cb5cf65b164c10771bd78d13945ee | 13,129 |
import tqdm
def analyze_subject(subject_id, A, B, spheres, interpolate, mask, data_dir=None):
"""
Parameters
----------
subject_id : int
unique ID of the subject (index of the fMRI data in the input dataset)
A : tuple
tuple of (even_trials, odd_trials) for the first condition (A);
even/odd_trials is the subject's mean fMRI data for that trial, and
should be either a 3D niimg or path (string) to a NIfTI file
B : tuple
tuple of (even_trials, odd_trials) for the second condition (B);
formatted the same as A
spheres : list
TODO
interpolate : bool
whether or not to skip every other sphere and interpolate the results;
used to speed up the analysis
mask : Niimg-like object
boolean image giving location of voxels containing usable signals
data_dir : string
path to directory where MVPA results should be stored
Returns
-------
score_map_fpath : str
path to NIfTI file with values indicating the significance of each voxel
for condition A; same shape as the mask
"""
A_even, A_odd = A
B_even, B_odd = B
if all(isinstance(img, str) for img in [A_even, A_odd, B_even, B_odd]):
A_even, A_odd = load_img(A_even), load_img(A_odd)
B_even, B_odd = load_img(B_even), load_img(B_odd)
A_even, A_odd = get_data(A_even), get_data(A_odd)
B_even, B_odd = get_data(B_even), get_data(B_odd)
_mask = get_data(mask)
scores = np.zeros_like(_mask, dtype=np.float64)
X, y = [], []
for (x0, y0, z0), sphere in tqdm(spheres):
_A_even, _A_odd = A_even[sphere].flatten(), A_odd[sphere].flatten()
_B_even, _B_odd = B_even[sphere].flatten(), B_odd[sphere].flatten()
AA_sim = atanh(np.corrcoef(np.vstack((_A_even, _A_odd)))[0, 1])
BB_sim = atanh(np.corrcoef(np.vstack((_B_even, _B_odd)))[0, 1])
AB_sim = atanh(np.corrcoef(np.vstack((_A_even, _B_odd)))[0, 1])
BA_sim = atanh(np.corrcoef(np.vstack((_B_even, _A_odd)))[0, 1])
scores[x0][y0][z0] = AA_sim + BB_sim - AB_sim - BA_sim
X.append(np.array([x0, y0, z0]))
y.append(scores[x0][y0][z0])
if interpolate:
interp = NearestNDInterpolator(np.vstack(X), y)
for indices in np.transpose(np.nonzero(_mask)):
x, y, z = indices
if not scores[x][y][z]:
scores[x][y][z] = interp(indices)
score_map_fpath = score_map_filename(data_dir, subject_id)
scores = new_img_like(mask, scores)
scores.to_filename(score_map_fpath)
return score_map_fpath | 9596de830bd554990dc286ea08c1ea967deb20a7 | 13,130 |
import re
def joinAges(dataDict):
"""Merges columns by county, dropping ages"""
popColumns = list(dataDict.values())[0].columns.tolist()
popColumns = [re.sub("[^0-9]", "", column) for column in popColumns]
dictOut = dict()
for compartmentName, table in dataDict.items():
table.columns = popColumns
dictOut[compartmentName] = table.sum(axis=1, level=0)
return dictOut | d83ee4883ba58f7090141c131c4e111a4805f15d | 13,131 |
def plot_graph_route(G, route, bbox=None, fig_height=6, fig_width=None,
margin=0.02, bgcolor='w', axis_off=True, show=True,
save=False, close=True, file_format='png', filename='temp',
dpi=300, annotate=False, node_color='#999999',
node_size=15, node_alpha=1, node_edgecolor='none',
node_zorder=1, edge_color='#999999', edge_linewidth=1,
edge_alpha=1, use_geom=True, origin_point=None,
destination_point=None, route_color='r', route_linewidth=4,
route_alpha=0.5, orig_dest_node_alpha=0.5,
orig_dest_node_size=100, orig_dest_node_color='r',
orig_dest_point_color='b'):
"""
Plot a route along a networkx spatial graph.
Parameters
----------
G : networkx multidigraph
route : list
the route as a list of nodes
bbox : tuple
bounding box as north,south,east,west - if None will calculate from
spatial extents of data. if passing a bbox, you probably also want to
pass margin=0 to constrain it.
fig_height : int
matplotlib figure height in inches
fig_width : int
matplotlib figure width in inches
margin : float
relative margin around the figure
axis_off : bool
if True turn off the matplotlib axis
bgcolor : string
the background color of the figure and axis
show : bool
if True, show the figure
save : bool
if True, save the figure as an image file to disk
close : bool
close the figure (only if show equals False) to prevent display
file_format : string
the format of the file to save (e.g., 'jpg', 'png', 'svg')
filename : string
the name of the file if saving
dpi : int
the resolution of the image file if saving
annotate : bool
if True, annotate the nodes in the figure
node_color : string
the color of the nodes
node_size : int
the size of the nodes
node_alpha : float
the opacity of the nodes
node_edgecolor : string
the color of the node's marker's border
node_zorder : int
zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot
nodes beneath them or 3 to plot nodes atop them
edge_color : string
the color of the edges' lines
edge_linewidth : float
the width of the edges' lines
edge_alpha : float
the opacity of the edges' lines
use_geom : bool
if True, use the spatial geometry attribute of the edges to draw
geographically accurate edges, rather than just lines straight from node
to node
origin_point : tuple
optional, an origin (lat, lon) point to plot instead of the origin node
destination_point : tuple
optional, a destination (lat, lon) point to plot instead of the
destination node
route_color : string
the color of the route
route_linewidth : int
the width of the route line
route_alpha : float
the opacity of the route line
orig_dest_node_alpha : float
the opacity of the origin and destination nodes
orig_dest_node_size : int
the size of the origin and destination nodes
orig_dest_node_color : string
the color of the origin and destination nodes
orig_dest_point_color : string
the color of the origin and destination points if being plotted instead
of nodes
Returns
-------
fig, ax : tuple
"""
# plot the graph but not the route
fig, ax = plot_graph(G, bbox=bbox, fig_height=fig_height, fig_width=fig_width,
margin=margin, axis_off=axis_off, bgcolor=bgcolor,
show=False, save=False, close=False, filename=filename,
dpi=dpi, annotate=annotate, node_color=node_color,
node_size=node_size, node_alpha=node_alpha,
node_edgecolor=node_edgecolor, node_zorder=node_zorder,
edge_color=edge_color, edge_linewidth=edge_linewidth,
edge_alpha=edge_alpha, use_geom=use_geom)
# the origin and destination nodes are the first and last nodes in the route
origin_node = route[0]
destination_node = route[-1]
if origin_point is None or destination_point is None:
# if caller didn't pass points, use the first and last node in route as
# origin/destination
origin_destination_lats = (G.nodes[origin_node]['y'], G.nodes[destination_node]['y'])
origin_destination_lons = (G.nodes[origin_node]['x'], G.nodes[destination_node]['x'])
else:
# otherwise, use the passed points as origin/destination
origin_destination_lats = (origin_point[0], destination_point[0])
origin_destination_lons = (origin_point[1], destination_point[1])
orig_dest_node_color = orig_dest_point_color
# scatter the origin and destination points
ax.scatter(origin_destination_lons, origin_destination_lats, s=orig_dest_node_size,
c=orig_dest_node_color, alpha=orig_dest_node_alpha, edgecolor=node_edgecolor, zorder=4)
# plot the route lines
edge_nodes = list(zip(route[:-1], route[1:]))
lines = []
for u, v in edge_nodes:
# if there are parallel edges, select the shortest in length
data = min(G.get_edge_data(u, v).values(), key=lambda x: x['length'])
# if it has a geometry attribute (ie, a list of line segments)
if 'geometry' in data and use_geom:
# add them to the list of lines to plot
xs, ys = data['geometry'].xy
lines.append(list(zip(xs, ys)))
else:
# if it doesn't have a geometry attribute, the edge is a straight
# line from node to node
x1 = G.nodes[u]['x']
y1 = G.nodes[u]['y']
x2 = G.nodes[v]['x']
y2 = G.nodes[v]['y']
line = [(x1, y1), (x2, y2)]
lines.append(line)
# add the lines to the axis as a linecollection
lc = LineCollection(lines, colors=route_color, linewidths=route_linewidth, alpha=route_alpha, zorder=3)
ax.add_collection(lc)
# save and show the figure as specified
fig, ax = save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off)
return fig, ax | 19483338300d2f0fe9426942b5e0a196178cc036 | 13,132 |
from typing import Dict
def random_polynomialvector(
secpar: int, lp: LatticeParameters, distribution: str, dist_pars: Dict[str, int], num_coefs: int,
bti: int, btd: int, const_time_flag: bool = True
) -> PolynomialVector:
"""
Generate a random PolynomialVector with bounded Polynomial entries. Essentially just instantiates a PolynomialVector
object with a list of random Polynomial objects as entries, which are in turn generated by random_polynomial
:param secpar: Input security parameter
:type secpar: int
:param lp: Lattice parameters
:type lp: LatticeParameters
:param distribution: String code describing which distribution to use
:type distribution: str
:param dist_pars: Distribution parameters
:type dist_pars: dict
:param num_coefs: Number of coefficients to generate
:type num_coefs: int
:param bti: Number of bits required to unbiasedly sample indices without replacement.
:type bti: int
:param btd: Number of bits required to unbiasedly sample an integer modulo the modulus in lp
:type btd: int
:param const_time_flag: Indicates whether arithmetic should be constant time.
:type const_time_flag: bool
:return:
:rtype: PolynomialVector
"""
if secpar < 1:
raise ValueError('Cannot random_polynomialvector without an integer security parameter.')
elif distribution == UNIFORM_INFINITY_WEIGHT:
return random_polynomial_vector_inf_wt_unif(
secpar=secpar, lp=lp, dist_pars=dist_pars, num_coefs=num_coefs,
bti=bti, btd=btd, const_time_flag=const_time_flag
)
raise ValueError('Tried to random_polynomialvector with a distribution that is not supported.') | 43d059c69f74f2ba91fec690cc6d9a86ca51cf2a | 13,133 |
def parse_module(file_name, file_reader):
"""Parses a module, returning a module-level IR.
Arguments:
file_name: The name of the module's source file.
file_reader: A callable that returns either:
(file_contents, None) or
(None, list_of_error_detail_strings)
Returns:
(ir, debug_info, errors), where ir is a module-level intermediate
representation (IR), debug_info is a ModuleDebugInfo containing the
tokenization, parse tree, and original source text of all modules, and
errors is a list of tokenization or parse errors. If errors is not an empty
list, ir will be None.
Raises:
FrontEndFailure: An error occurred while reading or parsing the module.
str(error) will give a human-readable error message.
"""
source_code, errors = file_reader(file_name)
if errors:
location = parser_types.make_location((1, 1), (1, 1))
return None, None, [
[error.error(file_name, location, "Unable to read file.")] +
[error.note(file_name, location, e) for e in errors]
]
return parse_module_text(source_code, file_name) | d7c7bc55e3020444473c7b1fd66214a9a2451cb5 | 13,134 |
def get_glare_value(gray):
"""
:param gray: cv2.imread(image_path) grayscale image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
:return: numrical value between 0-256 which tells the glare value
"""
blur = cv2.blur(gray, (3, 3)) # With kernel size depending upon image size
mean_blur = cv2.mean(blur)
return mean_blur[0] | c019d79f47949a061e74129b56bfb3d413d03314 | 13,135 |
import torch
def makeCrops(image, stepSize, windowSize, true_center):
"""
"""
image = image.type(torch.FloatTensor)
crops = []
truths = []
c_x, c_y, orient = true_center
# TODO: look into otdering, why it's y,x !
margin = 15
# --> is x, but is the column
# to slide horizontally, y must come first
for y in range(0, image.shape[0] - windowSize[0] + 1, stepSize):
for x in range(0, image.shape[1] - windowSize[1] + 1, stepSize):
end_x, end_y = x + windowSize[1], y + windowSize[0]
hasRect = (x + margin < c_x < end_x - margin) and (
y + margin < c_y < end_y - margin
)
truths.append(hasRect)
crops.append(image[y:end_y, x:end_x])
crops = torch.stack(crops)
print("shape of crops", crops.shape)
return crops, truths | 26c813741c799a6f13c0afee3969a6a669064974 | 13,136 |
def generate_n_clusters(object_generator, n_clusters, n_objects_per_cluster, *, rng=None):
""" Creates n_clusters of random objects """
rng = np.random.default_rng(rng)
object_clusters = []
for i in range(n_clusters):
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 16, rng=rng)
object_clusters.append(cluster_objects)
all_objects = [item for sublist in object_clusters for item in sublist]
return all_objects, object_clusters | 1de8c3793abaf635e182b6b4640ddd8bd7d1ed28 | 13,137 |
def disp2vel(wrange, velscale):
""" Returns a log-rebinned wavelength dispersion with constant velocity.
This code is an adaptation of pPXF's log_rebin routine, simplified to
deal with the wavelength dispersion only.
Parameters
----------
wrange: list, np.array or astropy.Quantity
Input wavelength dispersion range with two elements.
velscale: float or astropy.Quantity
Desired output velocity scale. Units are assumed to be km/s unless
specified as an astropy.Quantity.
"""
c = 299792.458 # Speed of light in km/s
if isinstance(wrange, list):
wrange = np.array(wrange)
wunits = wrange.unit if hasattr(wrange, "unit") else 1
if hasattr(velscale, "unit"):
velscale = velscale.to(u.km/u.s).value
veldiff = np.log(np.max(wrange) / np.min(wrange)) * c
n = veldiff / velscale
m = int(n)
dv = 0.5 * (n-m) * velscale
v = np.arange(0, m * velscale, velscale) + dv
w = wrange[0] * np.exp(v / c)
return w * wunits | c15d5cf8dc3f26969f38e4f678441adeae710e77 | 13,138 |
def relabel(labels):
"""
Remaps integer labels based on who is most frequent
"""
uni_labels, uni_inv, uni_counts = np.unique(
labels, return_inverse=True, return_counts=True
)
sort_inds = np.argsort(uni_counts)[::-1]
new_labels = range(len(uni_labels))
uni_labels_sorted = uni_labels[sort_inds]
relabel_map = dict(zip(uni_labels_sorted, new_labels))
new_labels = np.array(itemgetter(*labels)(relabel_map))
return new_labels | bc809781968387ec9de9de05f8d5cd990ede4c62 | 13,139 |
from typing import List
from typing import Tuple
def precision_at_threshold(
weighted_actual_names: List[Tuple[str, float, int]],
candidates: np.ndarray,
threshold: float,
distances: bool = False,
) -> float:
"""
Return the precision at a threshold for the given weighted-actuals and candidates
:param weighted_actual_names: list of [name, weight, ?] - weight and ? are ignored
:param candidates: array of [name, score]
:param threshold: threshold
:param distances: if True, score must be <= threshold; if False, score must be >= threshold; defaults to False
"""
matches = _get_matches(candidates, threshold, distances)
num_matches = len(matches)
if num_matches == 0:
return 1.0
return len(set(name for name, weight, _ in weighted_actual_names).intersection(matches)) / num_matches | 40c99830339418acee59c5364f1a70dc5639a475 | 13,140 |
def alias(*alias):
"""Select a (list of) alias(es)."""
valias = [t for t in alias]
return {"alias": valias} | b2ff51f33b601468b1ba4d371bd5abd6d013a188 | 13,141 |
def create_blueprint(request_manager):
"""
Creates an instance of the blueprint.
"""
blueprint = Blueprint('requests', __name__, url_prefix='/requests')
# pylint: disable=unused-variable
@blueprint.route('<request_id>/state')
def get_state(request_id):
"""
Retrieves the state of the specified request.
---
parameters:
- name: request_id
description: id of the request
in: path
type: string
required: true
definitions:
RequestResponse:
description: Object containing the URL of a requests state
type: object
properties:
stateUrl:
description: URL the requests state can be retrieved from
type: string
StateResponse:
description: Object describing request state and result url
type: object
properties:
done:
description: whether the processing of the request is done
type: boolean
resultUrl:
description: URL the requests result can be retrieved from
type: string
responses:
200:
application/json:
schema:
$ref: '#/definitions/StateResponse'
"""
# TODO 404 on invalid request_id or no futures
return jsonify({
'done': request_manager.request_processed(request_id),
'resultUrl':
url_for('requests.get_result',
request_id=request_id,
_external=USE_EXTERNAL_URLS)
})
@blueprint.route('<request_id>/result')
def get_result(request_id):
"""
Retrieves the result of the specified request.
---
parameters:
- name: request_id
description: id of the request
in: path
type: string
required: true
responses:
200:
application/json:
schema:
description: object defined by the type of request
type: object
"""
if not request_manager.request_processed(request_id):
log.info('request "%s" not done or result already retrieved',
request_id)
abort(404)
result = request_manager.get_result(request_id)
log.debug(result)
if not result:
return jsonify({})
return jsonify(result)
return blueprint | f31e386357dba3e890ea6c6acada4ef6f1bee143 | 13,142 |
import pathlib
import traceback
def parse_smyle(file):
"""Parser for CESM2 Seasonal-to-Multiyear Large Ensemble (SMYLE)"""
try:
with xr.open_dataset(file, chunks={}, decode_times=False) as ds:
file = pathlib.Path(file)
parts = file.parts
# Case
case = parts[-6]
# Extract the component from the file string
component = parts[-5]
# Extract the frequency
frequency = parts[-2]
date_regex = r'\d{10}-\d{10}|\d{8}-\d{8}|\d{6}-\d{6}|\d{4}-\d{4}'
date_range = extract_attr_with_regex(parts[-1], date_regex)
# Pull out the start and end time
start_time, end_time = date_range.split('-')
# Extract variable and stream
y = parts[-1].split(date_range)[0].strip('.').split('.')
variable = y[-1]
stream = '.'.join(y[-3:-1])
# Extract init_year, init_month, member_id
z = extract_attr_with_regex(case, r'\d{4}-\d{2}.\d{3}').split('.')
inits = z[0].split('-')
init_year = int(inits[0])
init_month = int(inits[1])
member_id = z[-1]
x = case.split(z[0])[0].strip('.').split('.')
experiment = x[-2]
grid = x[-1]
# Get the long name from dataset
long_name = ds[variable].attrs.get('long_name')
# Grab the units of the variable
units = ds[variable].attrs.get('units')
# Set the default of # of vertical levels to 1
vertical_levels = 1
try:
vertical_levels = ds[ds.cf['vertical'].name].size
except (KeyError, AttributeError, ValueError):
pass
# Use standard region names
regions = {
'atm': 'global',
'ocn': 'global_ocean',
'lnd': 'global_land',
'ice': 'global',
}
spatial_domain = regions.get(component, 'global')
return {
'component': component,
'case': case,
'experiment': experiment,
'variable': variable,
'long_name': long_name.lower(),
'frequency': frequency,
'stream': stream,
'member_id': member_id,
'init_year': init_year,
'init_month': init_month,
'vertical_levels': vertical_levels,
'units': units,
'spatial_domain': spatial_domain,
'grid': grid,
'start_time': parse_date(start_time),
'end_time': parse_date(end_time),
'path': str(file),
}
except Exception:
return {INVALID_ASSET: file, TRACEBACK: traceback.format_exc()} | 791ecf41e4bc1b44ababbf35a021b4a48b46bc24 | 13,143 |
def get_shape(grid, major_ticks=False):
"""
Infer shape from grid
Parameters
----------
grid : ndarray
Minor grid nodes array
major_ticks : bool, default False
If true, infer shape of majr grid nodes
Returns
-------
shape : tuple
Shape of grid ndarray
"""
shape = tuple(len(np.unique(g)) for g in grid.T)
if major_ticks:
shape = tuple(np.max(grid + 1, axis=0).astype(int))
return shape | 57f487260ca19257bd3f9891ce87c52c1eafe3bc | 13,144 |
def lorentz_force_derivative(t, X, qm, Efield, Bfield):
"""
Useful when using generic integration schemes, such
as RK4, which can be compared to Boris-Bunemann
"""
v = X[3:]
E = Efield(X)
B = Bfield(X)
# Newton-Lorentz acceleration
a = qm*E + qm*np.cross(v,B)
ydot = np.concatenate((v,a))
return ydot | 7a7aade5ece2363e177002bac0c18c4a0b59174f | 13,145 |
def copy_rate(source, target, tokenize=False):
"""
Compute copy rate
:param source:
:param target:
:return:
"""
if tokenize:
source = toktok(source)
target = toktok(target)
source_set = set(source)
target_set = set(target)
if len(source_set) == 0 or len(target_set) == 0:
return 0.
return set_overlap(source_set, target_set) | 80b94e90ab43df2f33869660f4b83f41721826f0 | 13,146 |
import json
def read_json_info(fname):
"""
Parse info from the video information file.
Returns: Dictionary containing information on podcast episode.
"""
with open(fname) as fin:
return json.load(fin) | 1eed945ce2917cbca1fb807a807ab57229622374 | 13,147 |
def check_subman_version(required_version):
"""
Verify that the command 'subscription-manager' isn't too old.
"""
status, _ = check_package_version('subscription-manager', required_version)
return status | 33e14fd5cf68e170f5804ae393cb2a45878d19a6 | 13,148 |
import os
def create_output_directory(input_directory):
"""Creates new directory and returns its path"""
output_directory = ''
increment = 0
done_creating_directory = False
while not done_creating_directory:
try:
if input_directory.endswith('/'):
output_directory = input_directory + 'converted'
else:
output_directory = input_directory + '/converted'
if increment is not 0:
output_directory += str(increment)
os.makedirs(output_directory, exist_ok=False)
done_creating_directory = True
except FileExistsError:
increment += 1
return output_directory | b2496045e8c9fbd627c32ea40a7b77181b7f4c1d | 13,149 |
import random
def bigsegment_twocolor(rows, cols, seed=None):
"""
Form a map from intersecting line segments.
"""
if seed is not None:
random.seed(seed)
possible_nhseg = [3,5]
possible_nvseg = [1,3,5]
gap_probability = random.random() * 0.10
maxdim = max(rows, cols)
nhseg = 0
nvseg = 0
while (nhseg == 0 and nvseg == 0) or (nhseg % 2 != 0 and nvseg == 0):
nhseg = random.choice(possible_nhseg)
nvseg = random.choice(possible_nvseg)
jitterx = 15
jittery = 15
team1_pattern, team2_pattern = segment_pattern(
rows,
cols,
seed,
colormode="classic",
nhseg=nhseg,
nvseg=nvseg,
jitterx=jitterx,
jittery=jittery,
gap_probability=gap_probability,
)
pattern1_url = pattern2url(team1_pattern)
pattern2_url = pattern2url(team2_pattern)
return pattern1_url, pattern2_url | 1df4861434b19d6bdebe926baad57e3a11f6a64b | 13,150 |
def first_order_smoothness_loss(
image, flow,
edge_weighting_fn):
"""Computes a first-order smoothness loss.
Args:
image: Image used for the edge-aware weighting [batch, height, width, 2].
flow: Flow field for with to compute the smoothness loss [batch, height,
width, 2].
edge_weighting_fn: Function used for the edge-aware weighting.
Returns:
Average first-order smoothness loss.
"""
img_gx, img_gy = image_grads(image)
weights_x = edge_weighting_fn(img_gx)
weights_y = edge_weighting_fn(img_gy)
# Compute second derivatives of the predicted smoothness.
flow_gx, flow_gy = image_grads(flow)
# Compute weighted smoothness
return ((tf.reduce_mean(input_tensor=weights_x * robust_l1(flow_gx)) +
tf.reduce_mean(input_tensor=weights_y * robust_l1(flow_gy))) / 2.) | 92e0eb047bb9d5d67a32c8ba7a601e4b7c0333b8 | 13,151 |
def where_is_my_birthdate_in_powers_of_two(date: int) -> int:
"""
>>> where_is_my_birthdate_in_powers_of_two(160703)
<BLANKLINE>
Dans la suite des
<BLANKLINE>
0 1 3 765
2 , 2 , 2 , …, 2
<BLANKLINE>
Ta date de naissance apparaît ici!:
<BLANKLINE>
…5687200260623819378316070394980560315787
^~~~~~
<BLANKLINE>
À la position #88532
<BLANKLINE>
765
"""
date = str(date)
located = False
sequence = ""
sequence_index = 0
while not located:
sequence_index += 1
sequence += str(2 ** sequence_index)
found_at = sequence.find(date)
if found_at != -1:
print(f"""
Dans la suite des
0 1 3 {sequence_index}
2 , 2 , 2 , …, 2
Ta date de naissance apparaît ici!:
…{numbers_around(sequence, at=found_at, lookaround=20)}
{20 * ' '}^{(len(date)-1) * '~'}
À la position #{sequence.find(date)}
""")
return sequence_index | 656e7c76c849ba4348a5499a1c5cbd02574db011 | 13,152 |
def make_df_health_all(datadir):
"""
Returns full dataframe from health data at specified location
"""
df_health_all = pd.read_csv(str(datadir) + '/health_data_all.csv')
return df_health_all | c9fe0efe65f4455bc9770aa621bed5aaa54fb47f | 13,153 |
def lothars_in_cv2image(image, lothars_encoders,fc):
"""
Given image open with opencv finds
lothars in the photo and the corresponding name and encoding
"""
# init an empty list for selfie and corresponding name
lothar_selfies=[]
names=[]
encodings=[]
# rgb image
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#convert image to Greyscale for HaarCascade
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# cycle angles to until cv find a faces
found=False
angles=rotation_angles(5)
for angle in angles:
r_gray=rotate_image(gray,angle)
faces = fc.detectMultiScale(r_gray,
scaleFactor=1.3,
minNeighbors=6,
minSize=(30, 40),
flags=cv2.CASCADE_SCALE_IMAGE)
# cycle all faces found
for i,face in enumerate(faces):
# define the face rectangle
(x,y,w,h) = face
height, width = image.shape[:2]
extra_h=((1+2*extra)/ratio-1)/2
x=int(max(0,x-w*extra))
y=int(max(0,y-h*extra_h))
w=int(min(w+2*w*extra,width))
h=int(min(h+2*h*extra_h,height))
#print('w/h=',w/h)
# rotate colored image
rotated_image=rotate_image(image,angle)
# Save just the rectangle faces in SubRecFaces (no idea of meaning of 255)
#cv2.rectangle(rotated_image, (x,y), (x+w,y+h), (255,255,255))
sub_face = rotated_image[y:y+h, x:x+w]
index, name, encoding = lothars_in_selfies([subface], lothars_encoders,
[x,y,w,h],
num_jitters=2,keep_searching=False)
if (len(name)>0):
lothar_selfies.append(sub_face)
names.append(which_lothar_is)
encodings.append(encoding)
found=True
# break angle changes if a lothar was found
if (found):
break
return lothar_selfies, names, encodings | be869c922299178dd3f4835a6fd547c779c4e11a | 13,154 |
def approx_nth_prime_upper(n):
""" approximate upper limit for the nth prime number. """
return ceil(1.2 * approx_nth_prime(n)) | 9cfebe3c1dbac176fe917f97664b287b8024c5d4 | 13,155 |
def wavelength_to_velocity(wavelengths, input_units, center_wavelength=None,
center_wavelength_units=None, velocity_units='m/s',
convention='optical'):
"""
Conventions defined here:
http://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html
* Radio V = c (c/l0 - c/l)/(c/l0) f(V) = (c/l0) ( 1 - V/c )
* Optical V = c ((c/l0) - f)/f f(V) = (c/l0) ( 1 + V/c )^-1
* Redshift z = ((c/l0) - f)/f f(V) = (c/l0) ( 1 + z )-1
* Relativistic V = c ((c/l0)^2 - f^2)/((c/l0)^2 + f^2) f(V) = (c/l0) { 1 - (V/c)2}1/2/(1+V/c)
"""
if input_units in velocity_dict:
print "Already in velocity units (%s)" % input_units
return wavelengths
if center_wavelength is None:
raise ValueError("Cannot convert wavelength to velocity without specifying a central wavelength.")
if center_wavelength_units not in wavelength_dict:
raise ValueError("Bad wavelength units: %s" % (center_wavelength_units))
if velocity_units not in velocity_dict:
raise ValueError("Bad velocity units: %s" % (velocity_units))
wavelength_m = wavelengths / wavelength_dict['meters'] * wavelength_dict[input_units]
center_wavelength_m = center_wavelength / wavelength_dict['meters'] * wavelength_dict[center_wavelength_units]
frequency_hz = speedoflight_ms / wavelength_m
center_frequency_hz = speedoflight_ms / center_wavelength_m
# the order is very ugly because otherwise, if scalar, the spectroscopic axis attributes won't be inherited
if convention == 'radio':
velocity = ( frequency_hz - center_frequency_hz ) / center_frequency_hz * speedoflight_ms * -1
elif convention == 'optical':
velocity = ( frequency_hz - center_frequency_hz ) / frequency_hz * speedoflight_ms * -1
elif convention == 'relativistic':
velocity = ( frequency_hz**2 - center_frequency_hz**2 ) / ( center_frequency_hz**2 + frequency_hz )**2 * speedoflight_ms * -1
else:
raise ValueError('Convention "%s" is not allowed.' % (convention))
velocities = velocity * velocity_dict['m/s'] / velocity_dict[velocity_units]
return velocities | ffa1c1bb69a7f6767efcd2f5494da43847a80d3b | 13,156 |
import json
def gen_api_json(api):
"""Apply the api literal object to the template."""
api = json.dumps(
api, cls=Encoder, sort_keys=True, indent=1, separators=(',', ': ')
)
return TEMPLATE_API_DEFINITION % (api) | d9acea9483199746a9c97d905f06b17f20bff18c | 13,157 |
import copy
from bs4 import BeautifulSoup
import re
def get_massage():
"""
Provide extra data massage to solve HTML problems in BeautifulSoup
"""
# Javascript code in ths page generates HTML markup
# that isn't parsed correctly by BeautifulSoup.
# To avoid this problem, all document.write fragments are removed
my_massage = copy(BeautifulSoup.MARKUP_MASSAGE)
my_massage.append((re.compile(u"document.write(.+);"), lambda match: ""))
my_massage.append((re.compile(u'alt=".+">'), lambda match: ">"))
return my_massage | b2a7555b48f4a208545ffb75a4cc36c8c43a1eb7 | 13,158 |
def generate_test_images():
"""Generate all test images.
Returns
-------
results: dict
A dictionary mapping test case name to xarray images.
"""
results = {}
for antialias, aa_descriptor in antialias_options:
for canvas, canvas_descriptor in canvas_options:
for func in (generate_test_001,
generate_test_002,
generate_test_003,
generate_test_004,
generate_test_005,
generate_test_007,
):
points, name = func()
aggregators = draw_lines(canvas, points, antialias)
img = shade(aggregators, cmap=cmap01)
description = "{}_{}_{}".format(
name, aa_descriptor, canvas_descriptor)
results[description] = img
for func in (generate_test_006, ):
points, name = func()
aggregator = draw_multi_segment_line(canvas, points, antialias)
img = shade(aggregator, cmap=cmap01)
description = "{}_{}_{}".format(
name, aa_descriptor, canvas_descriptor)
results[description] = img
return results | d4de85956dfae0cc7d5405b55c21a5063c4dc2c6 | 13,159 |
from typing import Dict
from typing import Type
import copy
def registered_metrics() -> Dict[Text, Type[Metric]]:
"""Returns standard TFMA metrics."""
return copy.copy(_METRIC_OBJECTS) | 0311def576648d6e621d35e6ac89f8cda1302029 | 13,160 |
import os
def get_pcap_path(name):
"""Given a pcap's name in the test directory, returns its full path."""
return os.path.join(PCAPS_DIR, name) | 5c42a96a375202b042c41f546012084665d62371 | 13,161 |
def text_dataset_construction(train_or_test, janossy_k, task, janossy_k2, sequence_len, all_data_size=0):
""" Data Generation """
janossy_k = 1
janossy_k2 = 1
args = parse_args()
task = str(args.task).lower()
X = np.load('../data_'+str(task)+str(sequence_len)+'.npy')
output_X = np.load('../label_'+str(task)+str(sequence_len)+'.npy')
output_X = np.reshape(output_X,(output_X.shape[0],1))
total_len = X.shape[0]
if (all_data_size > 0):
total_len = all_data_size
train_len = int(total_len*0.4)
valid_len = int(total_len*0.2)
NUM_TRAINING_EXAMPLES = train_len
NUM_VALIDATION_EXAMPLES = valid_len
NUM_TEST_EXAMPLES = total_len - train_len - valid_len
#pdb.set_trace()
if train_or_test == 1:
X = X[0:train_len]
output_X = output_X[0:train_len]
num_examples = NUM_TRAINING_EXAMPLES
elif train_or_test == 2:
X = X[train_len:train_len+valid_len]
output_X = output_X[train_len:train_len+valid_len]
num_examples = NUM_VALIDATION_EXAMPLES
elif train_or_test == 0:
X = X[train_len+valid_len:]
output_X = output_X[train_len+valid_len:]
num_examples = NUM_TEST_EXAMPLES
set_numbers = X.shape[1]
train_length = X.shape[0]
if janossy_k == 1 and janossy_k2 == 1:
return X, output_X
else:
X_janossy = janossy_text_input_construction(X, janossy_k,janossy_k2)
return X_janossy, output_X | d55cdade5e2b9bd8a4a2d3ee1e80f0e15f390fc8 | 13,162 |
def neo_vis(task_id):
"""
Args:
task_id:
Returns:
"""
project = get_project_detail(task_id, current_user.id)
return redirect(
url_for(
"main.neovis_page",
port=project["remark"]["port"],
pwd=project["remark"]["password"],
)
) | cb0af50364e857d8febb8771abd0222a6d993b2e | 13,163 |
def getfont(
fontname=None,
fontsize=None,
sysfontname=None,
bold=None,
italic=None,
underline=None):
"""Monkey-patch for ptext.getfont().
This will use our loader and therefore obey our case validation, caching
and so on.
"""
fontname = fontname or ptext.DEFAULT_FONT_NAME
fontsize = fontsize or ptext.DEFAULT_FONT_SIZE
key = (
fontname,
fontsize,
sysfontname,
bold,
italic,
underline
)
if key in ptext._font_cache:
return ptext._font_cache[key]
if fontname is None:
font = ptext._font_cache.get(key)
if font:
return font
font = pygame.font.Font(fontname, fontsize)
else:
font = fonts.load(fontname, fontsize)
if bold is not None:
font.set_bold(bold)
if italic is not None:
font.set_italic(italic)
if underline is not None:
font.set_underline(underline)
ptext._font_cache[key] = font
return font | 04f12244126efd8cf6f274991193a2d71f8797f5 | 13,164 |
def change_box(base_image,box,change_array):
"""
Assumption 1: Contents of box are as follows
[x1 ,y2 ,width ,height]
"""
height, width, _ = base_image.shape
new_box = [0,0,0,0]
for i,value in enumerate(change_array):
if value != 0:
new_box[i] = box[i] + value
else:
new_box[i] = box[i]
assert new_box[0] >= 0
assert new_box[1] >= 0
assert new_box[0]+new_box[2] <= width
assert new_box[1]+new_box[3] <= height
return new_box | 960b9f2c3ab1b65e9c7a708eac700dfaf65c67ac | 13,165 |
def fetchRepositoryFilter(critic, filter_id):
"""Fetch a RepositoryFilter object with the given filter id"""
assert isinstance(critic, api.critic.Critic)
return api.impl.filters.fetchRepositoryFilter(critic, int(filter_id)) | 76aa247ddf63838ff16131d0d7f1a04092ef3c41 | 13,166 |
def load_it(file_path: str, verbose: bool = False) -> object:
"""Loads from the given file path a saved object.
Args:
file_path: String file path (with extension).
verbose: Whether to print info about loading successfully or not.
Returns:
The loaded object.
Raises:
None.
"""
obj = None
with open(file_path, 'rb') as handle:
obj = pk.load(handle)
if verbose:
print('{} is successfully loaded.'.format(file_path))
return obj | 59795488dffbc1a69556b2619e8502cfa23d6d63 | 13,167 |
def connect_contigs(contigs, align_net_file, fill_min, out_dir):
"""Connect contigs across genomes by forming a graph that includes
net format aligning regions and contigs. Compute contig components
as connected components of that graph."""
# construct align net graph and write net BEDs
if align_net_file is None:
graph_contigs_nets = nx.Graph()
else:
graph_contigs_nets = make_net_graph(align_net_file, fill_min, out_dir)
# add contig nodes
for ctg in contigs:
ctg_node = GraphSeq(ctg.genome, False, ctg.chr, ctg.start, ctg.end)
graph_contigs_nets.add_node(ctg_node)
# intersect contigs BED w/ nets BED, adding graph edges.
intersect_contigs_nets(graph_contigs_nets, 0, out_dir)
intersect_contigs_nets(graph_contigs_nets, 1, out_dir)
# find connected components
contig_components = []
for contig_net_component in nx.connected_components(graph_contigs_nets):
# extract only the contigs
cc_contigs = [contig_or_net for contig_or_net in contig_net_component if contig_or_net.net is False]
if cc_contigs:
# add to list
contig_components.append(cc_contigs)
# write summary stats
comp_out = open('%s/contig_components.txt' % out_dir, 'w')
for ctg_comp in contig_components:
ctg_comp0 = [ctg for ctg in ctg_comp if ctg.genome == 0]
ctg_comp1 = [ctg for ctg in ctg_comp if ctg.genome == 1]
ctg_comp0_nt = sum([ctg.end-ctg.start for ctg in ctg_comp0])
ctg_comp1_nt = sum([ctg.end-ctg.start for ctg in ctg_comp1])
ctg_comp_nt = ctg_comp0_nt + ctg_comp1_nt
cols = [len(ctg_comp), len(ctg_comp0), len(ctg_comp1)]
cols += [ctg_comp0_nt, ctg_comp1_nt, ctg_comp_nt]
cols = [str(c) for c in cols]
print('\t'.join(cols), file=comp_out)
comp_out.close()
return contig_components | dc262d7469f524d8b37eebc50787a6e687a1ff90 | 13,168 |
import torch
import time
def train(loader, model, crit, opt, epoch):
"""Training of the CNN.
Args:
loader (torch.utils.data.DataLoader): Data loader
model (nn.Module): CNN
crit (torch.nn): loss
opt (torch.optim.SGD): optimizer for every parameters with True
requires_grad in model except top layer
epoch (int)
"""
batch_time = AverageMeter()
losses = AverageMeter()
data_time = AverageMeter()
forward_time = AverageMeter()
backward_time = AverageMeter()
# switch to train mode
model.train()
# create an optimizer for the last fc layer
optimizer_tl = torch.optim.SGD(
model.top_layer.parameters(),
lr=args.lr,
weight_decay=10 ** args.wd,
)
end = time.time()
print(epoch)
for i, (input_tensor, target) in enumerate(loader):
data_time.update(time.time() - end)
# save checkpoint
n = len(loader) * epoch + i
input_var = torch.autograd.Variable(input_tensor.cuda())
target_var = torch.autograd.Variable(target.cuda())
output = model(input_var)
loss = crit(output, target_var)
# record loss
# losses.update(loss.data[0], input_tensor.size(0))
# compute gradient and do SGD step
opt.zero_grad()
optimizer_tl.zero_grad()
loss.backward()
opt.step()
optimizer_tl.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# sava_params(epoch, model, opt, r'mobilenetv1_30')
if (epoch + 1) / 10 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_10')
if (epoch + 1) / 30 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_30')
if (epoch + 1) / 60 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_60')
if (epoch + 1) / 90 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_90')
if (epoch + 1) / 100 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_100')
return losses.avg | 64a8213d103f57b3305060b42f41c94a3d710759 | 13,169 |
import ostap.logger.table as T
def _h1_cmp_prnt_ ( h1 ,
h2 ,
head1 = '' ,
head2 = '' ,
title = '' ,
density = False ,
max_moment = 10 ,
exp_moment = True ,
prefix = '' ) :
""" Calculate and print some statistic information for two histos
>>> h1 , h2 = ...
>>> h1.cmp_prnt ( h2 )
"""
assert isinstance ( h1 , ROOT.TH1 ) and 1 == h1.dim () , \
"cmp_prnt: invalid type of h1 %s/%s" % ( h1 , type ( h1 ) )
if isinstance ( h2 , ROOT.TH1 ) :
assert 1 == h2.dim () , "cmp_prnt: invalid type of h2 %s/%s" % ( h2 , type ( h2 ) )
if density :
h1_ = h1.density() if hasattr ( h1 , 'density' ) else h1
h2_ = h2.density() if hasattr ( h2 , 'density' ) else h2
cmp = _h1_cmp_prnt_ ( h1_ , h2_ ,
head1 = head1 ,
head2 = head2 ,
title = title ,
density = False ,
prefix = prefix ,
max_moment = max_moment ,
exp_moment = exp_moment )
if h1_ is not h1 : del h1_
if h2_ is not h2 : del h2_
return cmp
if not head1 : head1 = h1.GetName()
if not head2 : head2 = h2.GetName()
fmt = '%+11.4g +- %-10.4g'
wid0 = 25
values = [ 'Mean' ,
'Rms' ,
'Skewness' ,
'Kurtosis' ]
numbers = []
mean = h1.mean () , h2.mean ()
rms = h1.rms () , h2.rms ()
skew = h1.skewness () , h2.skewness ()
kurt = h1.kurtosis () , h2.kurtosis ()
numbers.append ( mean )
numbers.append ( rms )
numbers.append ( skew )
numbers.append ( kurt )
if 4 < max_moment :
for i in range ( 5 , max_moment + 1 ) :
v1 = h1.stdMoment ( i , exp_moment )
v2 = h2.stdMoment ( i , exp_moment )
item = v1 , v2
numbers.append ( item )
if exp_moment : values .append ( 'ExpMom/%d' % i )
else : values .append ( 'StdMom/%d' % i )
numbers = tuple ( numbers )
values = tuple ( values )
wid1 = max ( len ( v ) for v in values )
wid1 = max ( wid1 , len ( 'Quantity' ) )
wid2 = max ( wid0 , len ( head1 ) )
wid3 = max ( wid0 , len ( head2 ) )
wid4 = max ( wid0 , len ( 'Delta' ) )
header = ( ( '{:^%d}' % wid1 ).format ( 'Quantity' ) ,
( '{:^%d}' % wid2 ).format ( head1 ) ,
( '{:^%d}' % wid3 ).format ( head2 ) ,
( '{:^%d}' % wid4 ).format ( 'Delta' ) )
table_data = [ header ]
for v , item in zip ( values , numbers ) :
v1 , v2 = item
dv = v1 - v2
row = allright ( v ) , v1.toString ( fmt ) , v2.toString( fmt ) , dv.toString ( fmt )
table_data.append ( row )
title = title if title else '%s vs %s' % ( head1 , head2 )
return T.table ( table_data , title = title , prefix = prefix ) | 99757ebd016b5a7c9d63b431fc5aee92c08d90e5 | 13,170 |
def add_scatter(x, scatter, in_place=False):
"""
Add a Gaussian scatter to x.
Parameters
----------
x : array_like
Values to add scatter to.
scatter : float
Standard deviation (sigma) of the Gaussian.
in_place : bool, optional
Whether to add the scatter to x in place or return a
new array.
Returns
-------
x : array_like
x with the added scatter.
"""
if in_place:
x += np.random.randn(*x.shape)*float(scatter)
else:
x = np.asarray(x)
x = x + np.random.randn(*x.shape)*float(scatter)
return x | 27c1423441f7841284201afd873c2c6050812d5f | 13,171 |
def validate_job_state(state):
"""
Validates whether a returned Job State has all the required fields with the right format.
If all is well, returns True,
otherwise this prints out errors to the command line and returns False.
Can be just used with assert in tests, like "assert validate_job_state(state)"
"""
required_fields = {
"job_id": str,
"user": str,
"wsid": int,
"authstrat": str,
"job_input": dict,
"updated": int,
"created": int,
"status": str,
}
optional_fields = {
"estimating": int,
"queued": int,
"running": int,
"finished": int,
"error_code": int,
"terminated_code": int,
"errormsg": str,
}
timestamp_fields = [
"created",
"updated",
"estimating",
"queued",
"running",
"completed",
]
# fields that have to be present based on the context of different statuses
valid_statuses = vars(Status)["_member_names_"]
status_context = {
"estimating": ["estimating"],
"running": ["running"],
"completed": ["completed"],
"error": ["error_code", "errormsg"],
"terminated": ["terminated_code"],
}
# 1. Make sure required fields are present and of the correct type
missing_reqs = list()
wrong_reqs = list()
for req in required_fields.keys():
if req not in state:
missing_reqs.append(req)
elif not isinstance(state[req], required_fields[req]):
wrong_reqs.append(req)
if missing_reqs or wrong_reqs:
print(f"Job state is missing required fields: {missing_reqs}.")
for req in wrong_reqs:
print(
f"Job state has faulty req - {req} should be of type {required_fields[req]}, but had value {state[req]}."
)
return False
# 2. Make sure that context-specific fields are present and the right type
status = state["status"]
if status not in valid_statuses:
print(f"Job state has invalid status {status}.")
return False
if status in status_context:
context_fields = status_context[status]
missing_context = list()
wrong_context = list()
for field in context_fields:
if field not in state:
missing_context.append(field)
elif not isinstance(state[field], optional_fields[field]):
wrong_context.append(field)
if missing_context or wrong_context:
print(f"Job state is missing status context fields: {missing_context}.")
for field in wrong_context:
print(
f"Job state has faulty context field - {field} should be of type {optional_fields[field]}, but had value {state[field]}."
)
return False
# 3. Make sure timestamps are really timestamps
bad_ts = list()
for ts_type in timestamp_fields:
if ts_type in state:
is_second_ts = is_timestamp(state[ts_type])
if not is_second_ts:
print(state[ts_type], "is not a second ts")
is_ms_ts = is_timestamp(state[ts_type] / 1000)
if not is_ms_ts:
print(state[ts_type], "is not a millisecond ts")
if not is_second_ts and not is_ms_ts:
bad_ts.append(ts_type)
if bad_ts:
for ts_type in bad_ts:
print(
f"Job state has a malformatted timestamp: {ts_type} with value {state[ts_type]}"
)
raise MalformedTimestampException()
return True | f108b7a80dee7777931aae994384d47f4a474d67 | 13,172 |
def get_urls(session):
"""
Function to get all urls of article in a table.
:param session: session establishes all conversations with the database and represents a “holding zone”.
:type session: sqlalchemy.session
:returns: integer amount of rows in table
"""
url = session.query(Article.url)
return [u[0] for u in url] | ad5e4797c1a41c63ef225becaee1a9b8814a3ea2 | 13,173 |
import pkg_resources
def check_min_package_version(package, minimum_version, should_trunc_to_same_len=True):
"""Helper to decide if the package you are using meets minimum version requirement for some feature."""
real_version = pkg_resources.get_distribution(package).version
if should_trunc_to_same_len:
minimum_version = minimum_version[0 : len(real_version)]
logger.debug(
"package %s, version: %s, minimum version to run certain features: %s", package, real_version, minimum_version
)
return real_version >= minimum_version | 62bbed35905bf4aa38fb1596cf164a66eac30594 | 13,174 |
def bboxes_protection(boxes, width, height):
"""
:param boxes:
:param width:
:param height:
:return:
"""
if not isinstance(boxes, np.ndarray):
boxes = np.asarray(boxes)
if len(boxes) > 0:
boxes[:, [0, 2]] = np.clip(boxes[:, [0, 2]], 0, width - 1)
boxes[:, [1, 3]] = np.clip(boxes[:, [1, 3]], 0, height - 1)
return boxes | 8ab0c64788815f6ec66f42c90a4c2debc0627548 | 13,175 |
def mark_astroids(astroid_map):
"""
Mark all coordiantes in the grid with an astroid (# sign)
"""
astroids = []
for row, _ in enumerate(astroid_map):
for col, _ in enumerate(astroid_map[row]):
if astroid_map[row][col] == "#":
astroid_map[row][col] = ASTROID
astroids.append((row, col))
else:
astroid_map[row][col] = SPACE
return astroids | 36ac179f1cbc040142bea8381c4c85f90c81ecba | 13,176 |
def process_player_data(
prefix, season=CURRENT_SEASON, gameweek=NEXT_GAMEWEEK, dbsession=session
):
"""
transform the player dataframe, basically giving a list (for each player)
of lists of minutes (for each match, and a list (for each player) of
lists of ["goals","assists","neither"] (for each match)
"""
df = get_player_history_df(
prefix, season=season, gameweek=gameweek, dbsession=dbsession
)
df["neither"] = df["team_goals"] - df["goals"] - df["assists"]
df.loc[(df["neither"] < 0), ["neither", "team_goals", "goals", "assists"]] = [
0.0,
0.0,
0.0,
0.0,
]
alpha = get_empirical_bayes_estimates(df)
y = df.sort_values("player_id")[["goals", "assists", "neither"]].values.reshape(
(
df["player_id"].nunique(),
df.groupby("player_id").count().iloc[0]["player_name"],
3,
)
)
minutes = df.sort_values("player_id")["minutes"].values.reshape(
(
df["player_id"].nunique(),
df.groupby("player_id").count().iloc[0]["player_name"],
)
)
nplayer = df["player_id"].nunique()
nmatch = df.groupby("player_id").count().iloc[0]["player_name"]
player_ids = np.sort(df["player_id"].unique())
return (
dict(
nplayer=nplayer,
nmatch=nmatch,
minutes=minutes.astype("int64"),
y=y.astype("int64"),
alpha=alpha,
),
player_ids,
) | dcbf210242509fa3df1ae5ca35614d802c460381 | 13,177 |
def copy_to_table(_dal, _values, _field_names, _field_types, _table_name, _create_table=None, _drop_existing=None):
"""Copy a matrix of data into a table on the resource, return the table name.
:param _dal: An instance of DAL(qal.dal.DAL)
:param _values: The a list(rows) of lists(values) with values to be inserted
:param _field_names: The name of the fields(columns)
:param _field_types: The field types(qal.sql.types)
:param _table_name: The name of the destination tables
:param _create_table: Create the destination table based on _field_names, _field_types
:param _drop_existing: If a table with the same name as the destination table already exists, drop it
:return: The name of the destination table.
"""
if _drop_existing:
try:
_dal.execute(VerbDropTable(_table_name).as_sql(_dal.db_type))
_dal.commit()
except Exception as e:
print("copy_to_table - Ignoring error when dropping the table \"" + _table_name + "\": " + str(e))
if _create_table:
# Always create temporary table even if it ends up empty.
_create_table_sql = create_table_skeleton(_table_name, _field_names, _field_types).as_sql(_dal.db_type)
print("Creating " + _table_name + " table in "+ str(_dal) +"/" + str(_dal.connection) +", sql:\n" + _create_table_sql)
_dal.execute(_create_table_sql)
_dal.commit()
if len(_values) == 0:
print("copy_to_table: No source data, inserting no rows.")
else:
_insert_sql = make_insert_sql_with_parameters(_table_name, _field_names, _dal.db_type, _field_types)
print("Inserting " + str(len(_values)) + " rows (" + str(len(_values[0])) + " columns)")
_dal.executemany(_insert_sql, _values)
_dal.commit()
return _table_name | 765ae0310811fe64b063c88182726174411960a0 | 13,178 |
import os
def build_file_path(base_dir, base_name, *extensions):
"""Build a path to a file in a given directory.
The file may have an extension(s).
:returns: Path such as: 'base_dir/base_name.ext1.ext2.ext3'
"""
file_name = os.extsep.join([base_name] + list(extensions))
return os.path.expanduser(os.path.join(base_dir, file_name)) | 26e5998c5ba53fd9c99677991ff4cf2e82141f15 | 13,179 |
from scipy.optimize import minimize
def Uni(A, b, x=None, maxQ=False, x0=None, tol=1e-12, maxiter=1e3):
"""
Вычисление распознающего функционала Uni.
В случае, если maxQ=True то находится максимум функционала.
Parameters:
A: Interval
Матрица ИСЛАУ.
b: Interval
Вектор правой части ИСЛАУ.
Optional Parameters:
x: float, array_like
Точка в которой вычисляется распознающий функционал.
По умолчанию x равен массиву из нулей.
maxQ: bool
Если значение параметра равно True, то производится
максимизация функционала.
x0: float, array_like
Первоначальная догадка.
tol: float
Погрешность для прекращения оптимизационного процесса.
maxiter: int
Максимальное количество итераций.
Returns:
out: float, tuple
Возвращается значение распознающего функционала в точке x.
В случае, если maxQ=True, то возвращается кортеж, где
первый элемент -- корректность завершения оптимизации,
второй элемент -- точка оптимума,
третий элемент -- значение функции в этой точке.
"""
__uni = lambda x: min(b.rad - (b.mid - A @ x).mig)
__minus_uni = lambda x: -__uni(x)
if maxQ==False:
if x is None:
x = np.zeros(A.shape[1])
return __uni(x)
else:
if x0 is None:
x0 = np.zeros(A.shape[1])+1
maximize = minimize(__minus_uni, x0, method='Nelder-Mead', tol=tol,
options={'maxiter': maxiter})
return maximize.success, maximize.x, -maximize.fun | d81f8f38e4b2f196c79eaaa00c1b604cf119b1bb | 13,180 |
def boqa(alpha, beta, query, items_stat):
"""Implementation of the BOQA algorithm.
Args:
alpha (float): False positive rate.
beta (float): False negative rate.
query (dict): Dict of query terms (standard terms). Key: term name, value: presence value
items_stat (dict): Dictionnary of items statistics. Key: disease, Value: list of items
Returns:
[dict]: Dictionnary of disease and their prediction probability.
"""
hidden = {}
p = {}
a = {}
a_init = 0
# For each disease
for disease in items_stat:
# We initiliaze Hidden Layer with values from the stats
for term in query:
if term in items_stat[disease]["feature"].keys():
proba = items_stat[disease]["feature"][term]
hidden[term] = np.random.choice([1, 0], p=[proba, 1 - proba])
else:
hidden[term] = 0
# Cardinality calculation of terms between H and Q
m = matrix_m(query, hidden)
a[disease] = (
pow(beta, m[0, 1])
* pow(1 - beta, m[1, 1])
* pow(1 - alpha, m[0, 0])
* pow(alpha, m[1, 0])
)
a_init += a[disease]
for disease in items_stat:
p[disease] = a[disease] / a_init
return p | 37ea565fa05b4a9bceeeb50f31e79394e9534966 | 13,181 |
from operator import inv
def d2c(sys,method='zoh'):
"""Continous to discrete conversion with ZOH method
Call:
sysc=c2d(sys,method='log')
Parameters
----------
sys : System in statespace or Tf form
method: 'zoh' or 'bi'
Returns
-------
sysc: continous system ss or tf
"""
flag = 0
if isinstance(sys, TransferFunction):
sys=tf2ss(sys)
flag=1
a=sys.A
b=sys.B
c=sys.C
d=sys.D
Ts=sys.dt
n=shape(a)[0]
nb=shape(b)[1]
nc=shape(c)[0]
tol=1e-12
if method=='zoh':
if n==1:
if b[0,0]==1:
A=0
B=b/sys.dt
C=c
D=d
else:
tmp1=hstack((a,b))
tmp2=hstack((zeros((nb,n)),eye(nb)))
tmp=vstack((tmp1,tmp2))
s=logm(tmp)
s=s/Ts
if norm(imag(s),ord='inf') > sqrt(sp.finfo(float).eps):
print("Warning: accuracy may be poor")
s=real(s)
A=s[0:n,0:n]
B=s[0:n,n:n+nb]
C=c
D=d
elif method=='foh':
a=mat(a)
b=mat(b)
c=mat(c)
d=mat(d)
Id = mat(eye(n))
A = logm(a)/Ts
A = real(around(A,12))
Amat = mat(A)
B = (a-Id)**(-2)*Amat**2*b*Ts
B = real(around(B,12))
Bmat = mat(B)
C = c
D = d - C*(Amat**(-2)/Ts*(a-Id)-Amat**(-1))*Bmat
D = real(around(D,12))
elif method=='bi':
a=mat(a)
b=mat(b)
c=mat(c)
d=mat(d)
poles=eigvals(a)
if any(abs(poles-1)<200*sp.finfo(float).eps):
print("d2c: some poles very close to one. May get bad results.")
I=mat(eye(n,n))
tk = 2 / sqrt (Ts)
A = (2/Ts)*(a-I)*inv(a+I)
iab = inv(I+a)*b
B = tk*iab
C = tk*(c*inv(I+a))
D = d- (c*iab)
else:
print("Method not supported")
return
sysc=StateSpace(A,B,C,D)
#print("Teste ", sysc)
if flag==1:
sysc=ss2tf(sysc)
return sysc | 41bb37fcf5b8726b5f20f54f492a568f508725fc | 13,182 |
import ipaddress
def ipv4(value):
"""
Parses the value as an IPv4 address and returns it.
"""
try:
return ipaddress.IPv4Address(value)
except ValueError:
return None | 499918424fe6a94d555379b5fc907367666f1cde | 13,183 |
from typing import Callable
from typing import Mapping
from typing import cast
from typing import Any
def test_from(
fork: str,
) -> Callable[
[Callable[[], StateTest]], Callable[[str], Mapping[str, Fixture]]
]:
"""
Decorator that takes a test generator and fills it for all forks after the
specified fork.
"""
fork = fork.capitalize()
def decorator(
fn: Callable[[], StateTest]
) -> Callable[[str], Mapping[str, Fixture]]:
def inner(engine) -> Mapping[str, Fixture]:
return fill_state_test(fn(), forks_from(fork), engine)
cast(Any, inner).__filler_metadata__ = {
"fork": fork,
"name": fn.__name__.lstrip("test_"),
}
return inner
return decorator | 6c8704978c3ab37bb2ad8434b65359683bd76bbb | 13,184 |
import hashlib
def get_hash_name(feed_id):
"""
用户提交的订阅源,根据hash值生成唯一标识
"""
return hashlib.md5(feed_id.encode('utf8')).hexdigest() | edd1caf943635a091c79831cc6151ecfa840e435 | 13,185 |
from typing import TextIO
def _load_transition_probabilities(infile: TextIO) -> tuple[list, int]:
"""
For summary files with new syntax (post 2021-11-24).
Parameters
----------
infile : TextIO
The KSHELL summary file at the starting position of either of
the transition probability sections.
Returns
-------
transitions : list
List of transition data.
negative_spin_counts : int
The number of negative spin levels encountered.
Example
-------
B(E2) ( > -0.0 W.u.) mass = 50 1 W.u. = 10.9 e^2 fm^4
e^2 fm^4 (W.u.)
J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]
5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066
4 + 1 0.074 6 + 1 0.000 0.074 47.20641983 4.31409897 32.68136758 2.98668391
"""
negative_spin_counts = 0
transitions = []
for _ in range(2): infile.readline()
for line in infile:
line_split = line.split()
if not line_split: break
spin_initial = float(Fraction(line_split[0]))
parity_initial = _parity_string_to_integer(line_split[1])
idx_initial = int(line_split[2])
Ex_initial = float(line_split[3])
spin_final = float(Fraction(line_split[4]))
parity_final = _parity_string_to_integer(line_split[5])
idx_final = int(line_split[2])
Ex_final = float(line_split[7])
E_gamma = float(line_split[8])
reduced_transition_prob_decay = float(line_split[9])
reduced_transition_prob_excite = float(line_split[11])
if (spin_final < 0) or (spin_initial < 0):
"""
-1 spin states in the KSHELL data file indicates
bad states which should not be included.
"""
negative_spin_counts += 1 # Debug.
continue
# reduced_transition_prob_decay_list.append([
# 2*spin_initial, parity_initial, Ex_initial, 2*spin_final,
# parity_final, Ex_final, E_gamma, reduced_transition_prob_decay,
# reduced_transition_prob_excite
# ])
transitions.append([
2*spin_initial, parity_initial, idx_initial, Ex_initial,
2*spin_final, parity_final, idx_final, Ex_final, E_gamma,
reduced_transition_prob_decay, reduced_transition_prob_excite
])
return transitions, negative_spin_counts | 9d0b8ebc4ffe78517cb548ae3bbd329b48868589 | 13,186 |
from typing import Dict
from typing import Counter
def merge_lineages(counts: Dict[str, int], min_count: int) -> Dict[str, str]:
"""
Given a dict of lineage counts and a min_count, returns a mapping from all
lineages to merged lineages.
"""
assert isinstance(counts, dict)
assert isinstance(min_count, int)
assert min_count > 0
# Merge rare children into their parents.
counts: Dict[str, int] = Counter({decompress(k): v for k, v in counts.items()})
mapping = {}
for child in sorted(counts, key=lambda k: (-len(k), k)):
if counts[child] < min_count:
parent = get_parent(child)
if parent is None:
continue # at a root
counts[parent] += counts.pop(child)
mapping[child] = parent
# Transitively close.
for old, new in list(mapping.items()):
while new in mapping:
new = mapping[new]
mapping[old] = new
# Recompress.
mapping = {compress(k): compress(v) for k, v in mapping.items()}
return mapping | 3eea022d840e4e61d46041dffb188cbd73ed097b | 13,187 |
def str2bool(s):
"""特定の文字列をbool値にして返す。
s: bool値に変換する文字列(true, false, 1, 0など)。
"""
if isinstance(s, bool):
return s
else:
s = s.lower()
if s == "true":
return True
elif s == "false":
return False
elif s == "1":
return True
elif s == "0":
return False
else:
raise ValueError("%s is incorrect value!" % (s)) | 54b991e234896c0ad684ce5f0f2ccceeada65d8e | 13,188 |
def merge_aoistats(main_AOI_Stat,new_AOI_Stat,total_time,total_numfixations):
"""a helper method that updates the AOI_Stat object of this Scene with a new AOI_Stat object
Args:
main_AOI_Stat: AOI_Stat object of this Scene
new_AOI_Stat: a new AOI_Stat object
total_time:
total_numfixations:
Returns:
the updated AOI_Sata object
"""
maois = main_AOI_Stat
maois.features['numfixations'] += new_AOI_Stat.features['numfixations']
maois.features['longestfixation'] = max(maois.features['longestfixation'],new_AOI_Stat.features['longestfixation'])
maois.features['totaltimespent'] += + new_AOI_Stat.features['totaltimespent']
maois.features['proportiontime'] = float(maois.features['totaltimespent'])/total_time
maois.features['proportionnum'] = float(maois.features['numfixations'])/total_numfixations
if maois.features['totaltimespent']>0:
maois.features['fixationrate'] = float(maois.features['numfixations'])/maois.features['totaltimespent']
else:
maois.features['fixationrate'] = 0.0
#calculating the transitions to and from this AOI and other active AOIs at the moment
new_AOI_Stat_transition_aois = filter(lambda x: x.startswith(('numtransto_','numtransfrom_')),new_AOI_Stat.features.keys())
if params.DEBUG:
print("segement's transition_aois",new_AOI_Stat_transition_aois)
maois.total_tans_to += new_AOI_Stat.total_tans_to #updating the total number of transition to this AOI
maois.total_tans_from += new_AOI_Stat.total_tans_from #updating the total number of transition from this AOI
for feat in new_AOI_Stat_transition_aois:
if feat in maois.features.copy():
maois.features[feat] += new_AOI_Stat.features[feat]
else:
maois.features[feat] = new_AOI_Stat.features[feat]
# if feat.startswith('numtransto_'):
# sumtransto += maois.features[feat]
# else:
# sumtransfrom += maois.features[feat]
# updating the proportion tansition features based on new transitions to and from this AOI
maois_transition_aois = list(filter(lambda x: x.startswith(('numtransto_','numtransfrom_')),maois.features.keys())) #all the transition features for this AOI should be aupdated even if they are not active for this segment
for feat in maois_transition_aois.copy():
if feat.startswith('numtransto_'):
aid = feat.lstrip('numtransto_')
if maois.total_tans_to > 0:
maois.features['proptransto_%s'%(aid)] = float(maois.features[feat]) / maois.total_tans_to
else:
maois.features['proptransto_%s'%(aid)] = 0
else:
aid = feat.lstrip('numtransfrom_')
if maois.total_tans_from > 0:
maois.features['proptransfrom_%s'%(aid)] = float(maois.features[feat]) / maois.total_tans_from
else:
maois.features['proptransfrom_%s'%(aid)] = 0
###endof trnsition calculation
return maois | 8bea60667fa2cc3c187a1fa288c4c3053ba6484e | 13,189 |
import six
from typing import OrderedDict
def catalog_xmatch_circle(catalog, other_catalog,
radius='Association_Radius',
other_radius=Angle(0, 'deg')):
"""Find associations within a circle around each source.
This is convenience function built on `~astropy.coordinates.SkyCoord.search_around_sky`,
extending it in two ways:
1. Each source can have a different association radius.
2. Handle source catalogs (`~astropy.table.Table`) instead of `~astropy.coordinates.SkyCoord`.
Sources are associated if the sum of their radii is smaller than their separation on the sky.
Parameters
----------
catalog : `~astropy.table.Table`
Main source catalog
other_catalog : `~astropy.table.Table`
Other source catalog of potential associations
radius, other_radius : `~astropy.coordinates.Angle` or `str`
Main source catalog association radius.
For `str` this must be a column name (in degrees if without units)
Returns
-------
associations : `~astropy.table.Table`
The list of associations.
"""
if isinstance(radius, six.text_type):
radius = Angle(catalog[radius])
if isinstance(other_radius, six.text_type):
other_radius = Angle(other_catalog[other_radius])
skycoord = skycoord_from_table(catalog)
other_skycoord = skycoord_from_table(other_catalog)
association_catalog_name = other_catalog.meta.get('name', 'N/A')
# Compute associations as list of dict and store in `Table` at the end
associations = []
for source_index in range(len(catalog)):
# TODO: check if this is slower or faster than calling `SkyCoord.search_around_sky` here!?
separation = skycoord[source_index].separation(other_skycoord)
max_separation = radius[source_index] + other_radius
other_indices = np.nonzero(separation < max_separation)[0]
for other_index in other_indices:
association = OrderedDict(
Source_Index=source_index,
Source_Name=catalog['Source_Name'][source_index],
Association_Index=other_index,
Association_Name=other_catalog['Source_Name'][other_index],
Association_Catalog=association_catalog_name,
# There's an issue with scalar `Quantity` objects to init the `Table`
# https://github.com/astropy/astropy/issues/3378
# For now I'll just store the values without unit
Separation=separation[other_index].degree,
)
associations.append(association)
# Need to define columns if there's not a single association
if len(associations) == 0:
log.debug('No associations found.')
table = Table()
table.add_column(Column([], name='Source_Index', dtype=int))
table.add_column(Column([], name='Source_Name', dtype=str))
table.add_column(Column([], name='Association_Index', dtype=int))
table.add_column(Column([], name='Association_Name', dtype=str))
table.add_column(Column([], name='Association_Catalog', dtype=str))
table.add_column(Column([], name='Separation', dtype=float))
else:
log.debug('Found {} associations.'.format(len(associations)))
table = Table(associations, names=associations[0].keys())
return table | 1c56f35d7610c4f2d23e7eb5fd3007329f3dc298 | 13,190 |
def show_map_room(room_id=None):
"""Display a room on a map."""
return get_map_information(room_id=room_id) | 84c1e90ce5b0a210b75f104da429f03dff7b2ca1 | 13,191 |
import os
def color_enabled():
"""Check for whether color output is enabled
If the configuration value ``datalad.ui.color`` is ``'on'`` or ``'off'``,
that takes precedence.
If ``datalad.ui.color`` is ``'auto'``, and the environment variable
``NO_COLOR`` is defined (see https://no-color.org), then color is disabled.
Otherwise, enable colors if a TTY is detected by ``datalad.ui.ui.is_interactive``.
Returns
-------
bool
"""
ui_color = cfg.obtain('datalad.ui.color')
return (ui_color == 'on' or
ui_color == 'auto' and os.getenv('NO_COLOR') is None and ui.is_interactive) | 5db8ac3de0fa8b3e452d82244208987c8816cfb4 | 13,192 |
def parse_line_regex(line):
"""Parse raw data line into list of floats using regex.
This regex approach works, but is very slow!! It also requires two helper functions to clean up
malformed data written by ls-dyna (done on purpose, probably to save space).
Args:
line (str): raw data line from nodout
Returns:
raw_data (list of floats): [nodeID, xdisp, ydisp, zdisp]
"""
try:
raw_data = line.split()
raw_data = [float(x) for x in raw_data]
except ValueError:
line = correct_neg(line)
line = correct_Enot(line)
raw_data = line.split()
raw_data = [float(x) for x in raw_data[0:4]]
return raw_data | b8b18a8d47f5f1c9a682ca86668bf311282b0439 | 13,193 |
def create_page_metadata(image_dir,
image_dir_path,
font_files,
text_dataset,
speech_bubble_files,
speech_bubble_tags):
"""
This function creates page metadata for a single page. It includes
transforms, background addition, random panel removal,
panel shrinking, and the populating of panels with
images and speech bubbles.
:param image_dir: List of images to pick from
:type image_dir: list
:param image_dir_path: Path of images dir to add to
panels
:type image_dir_path: str
:param font_files: list of font files for speech bubble
text
:type font_files: list
:param text_dataset: A dask dataframe of text to
pick to render within speech bubble
:type text_dataset: pandas.dataframe
:param speech_bubble_files: list of base speech bubble
template files
:type speech_bubble_files: list
:param speech_bubble_tags: a list of speech bubble
writing area tags by filename
:type speech_bubble_tags: list
:return: Created Page with all the bells and whistles
:rtype: Page
"""
# Select page type
page_type = np.random.choice(
list(cfg.vertical_horizontal_ratios.keys()),
p=list(cfg.vertical_horizontal_ratios.values())
)
# Select number of panels on the page
# between 1 and 8
number_of_panels = np.random.choice(
list(cfg.num_pages_ratios.keys()),
p=list(cfg.num_pages_ratios.values())
)
page = get_base_panels(number_of_panels, page_type)
if np.random.random() < cfg.panel_transform_chance:
page = add_transforms(page)
page = shrink_panels(page)
page = populate_panels(page,
image_dir,
image_dir_path,
font_files,
text_dataset,
speech_bubble_files,
speech_bubble_tags
)
if np.random.random() < cfg.panel_removal_chance:
page = remove_panel(page)
if number_of_panels == 1:
page = add_background(page, image_dir, image_dir_path)
else:
if np.random.random() < cfg.background_add_chance:
page = add_background(page, image_dir, image_dir_path)
return page | 45499abf5374c8eaaa55a03f8ef0bb7fca6e18f5 | 13,194 |
def __merge_results(
result_list: tp.List[tp.Dict[str, tp.Dict[str, tp.Set[tp.Union[CVE, CWE]]]]]
) -> tp.Dict[str, tp.Dict[str, tp.Set[tp.Union[CVE, CWE]]]]:
"""
Merge a list of results into one dictionary.
Args:
result_list: a list of ``commit -> cve`` maps to be merged
Return:
the merged dictionary with line number as key and commit hash, a list of
unique CVE's and a list of unique CWE's as values
"""
results: tp.Dict[str, tp.Dict[str, tp.Set[tp.Union[
CVE, CWE]]]] = defaultdict(lambda: defaultdict(set))
for unmerged in result_list:
for entry in unmerged.keys():
results[entry]['cve'].update(unmerged[entry]['cve'])
results[entry]['cwe'].update(unmerged[entry]['cwe'])
return results | 685ce8e22483fc1dfe423763367857db22065a3c | 13,195 |
def compactness(xyz):
"""
Input: xyz
Output: compactness (V^2/SA^3) of convex hull of 3D points.
"""
xyz = np.array(xyz)
ch = ConvexHull(xyz, qhull_options="QJ")
return ch.volume**2/ch.area**3 | b1185e8aaec5962e39866594aeccdd5d5ae2807d | 13,196 |
import os
def is_posix():
"""Convenience function that tests different information sources to verify
whether the operating system is POSIX compliant.
.. note::
No assumption is made reading the POSIX level compliance.
:return: True if the operating system is MacOS, False otherwise.
:rtype: bool
"""
return os.name in OS_POSIX_NAMES | 7f40b0b6d785b63b0c7af6ac78272a4823994681 | 13,197 |
def guide(batch, z_dim, hidden_dim, out_dim=None, num_obs_total=None):
"""Defines the probabilistic guide for z (variational approximation to posterior): q(z) ~ p(z|q)
:param batch: a batch of observations
:return: (named) sampled z from the variational (guide) distribution q(z)
"""
assert(jnp.ndim(batch) == 3)
batch_size = jnp.shape(batch)[0]
batch = jnp.reshape(batch, (batch_size, -1)) # squash each data item into a one-dimensional array (preserving only the batch size on the first axis)
out_dim = jnp.shape(batch)[1]
num_obs_total = batch_size if num_obs_total is None else num_obs_total
encode = numpyro.module('encoder', encoder(hidden_dim, z_dim), (batch_size, out_dim))
with plate('batch', num_obs_total, batch_size):
z_loc, z_std = encode(batch) # obtain mean and variance for q(z) ~ p(z|x) from encoder
z = sample('z', dist.Normal(z_loc, z_std).to_event(1)) # z follows q(z)
return z | 1198ee7b12bed9118d7bb865ed45ff10eef917d4 | 13,198 |
def trip2str(trip):
""" Pretty-printing. """
header = "{} {} {} - {}:".format(trip['departureTime'],
trip['departureDate'], trip['origin'],
trip['destination'])
output = [header]
for subtrip in trip['trip']:
originstr = u'{}....{}'.format(subtrip['departureTime'],
subtrip['origin'])
output.append(originstr)
for subsubtrip in subtrip['trip']:
t = subsubtrip['arrivalTime']
d = subsubtrip['stop']
intermediatestr = t+u'.'*8+d
output.append(intermediatestr)
destinationstr = u'{}....{}'.format(subtrip['arrivalTime'],
subtrip['destination'])
output.append(destinationstr)
return "\n".join(output) | 67daf3feb6b81d40d3102a8c610b20e68571b131 | 13,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.