content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import types
def parse_container_args(field_type: type) -> types.Union[ParamType, types.Tuple[ParamType]]:
"""Parses the arguments inside a container type (lists, tuples and so on).
Args:
field_type (type): pydantic field type
Returns:
types.Union[ParamType, types.Tuple[ParamType]]: single click-compatible type or a tuple
"""
assert is_container(field_type), "Field type is not a container"
args = types.get_args(field_type)
# Early out for untyped containers: standard lists, tuples, List[Any]
# Use strings when the type is unknown, avoid click's type guessing
if len(args) == 0:
return str
# Early out for homogenous containers: Tuple[int], List[str]
if len(args) == 1:
return parse_single_arg(args[0])
# Early out for homogenous tuples of indefinite length: Tuple[int, ...]
if len(args) == 2 and args[1] is Ellipsis:
return parse_single_arg(args[0])
# Then deal with fixed-length containers: Tuple[str, int, int]
return tuple(parse_single_arg(arg) for arg in args) | 925ceab7886f47c41ed5b3189693db2a28c37db1 | 12,200 |
def decrypt_from_base64(ciphertext_bs64, decrypt_key: str , iv : str) -> str:
"""From base64 ciphertext decrypt to string.
"""
aes: AES_Turbe = AES_Turbe(decrypt_key,iv)
content_str = aes.decrypt_from_base64(ciphertext_bs64)
return content_str | 67c4e1bb610b4a28d6fc563b910e86554779f1f2 | 12,201 |
def _resize_data(image, mask):
"""Resizes images to smaller dimensions."""
image = tf.image.resize_images(image, [480, 640])
mask = tf.image.resize_images(mask, [480, 640])
return image, mask | 47961541bf903c8b84066766529bf28d268eaa36 | 12,202 |
def dist_Mpc_to_geo(dist):
"""convert distance from Mpc to geometric units (i.e., metres)"""
return dist * Mpc | d023b80da73420f5499be0eb14f4d2e515e54627 | 12,203 |
def upload_to_bucket(file_path, filename):
"""
Upload file to S3 bucket
"""
s3_client = boto3.client('s3')
success = False
try:
response = s3_client.upload_file(file_path, AWS_S3_BUCKET_NAME, filename)
success = True
except ClientError as e:
logger.error('Error at %s', 'boto3.exceptions.ClientError', exc_info=e)
return success | 630937591307d9ec55fe606b2e199b1219139bcb | 12,204 |
import os
def osqueryd_log_parser(osqueryd_logdir=None,
backuplogdir=None,
maxlogfilesizethreshold=None,
logfilethresholdinbytes=None,
backuplogfilescount=None,
enablediskstatslogging=False,
topfile_for_mask=None,
mask_passwords=False):
"""
Parse osquery daemon logs and perform log rotation based on specified parameters
osqueryd_logdir
Directory path where osquery result and snapshot logs would be created
backuplogdir
Directory path where hubble should create log file backups post log rotation
maxlogfilesizethreshold
Log file size threshold in bytes. If osquery log file size is greter than this value,
then logs will only be roatated but not parsed
logfilethresholdinbytes
Log file size threshold in bytes. If osquery log file is greter than this value,
then log rotation will be done once logs have been processed
backuplogfilescount
Number of log file backups to keep
enablediskstatslogging
Enable logging of disk usage of /var/log partition. Default is False
topfile_for_mask
This is the location of the top file from which the masking information
will be extracted
mask_passwords
Defaults to False. If set to True, passwords mentioned in the
return object are masked
"""
ret = []
if not osqueryd_logdir:
osqueryd_logdir = __opts__.get('osquerylogpath')
result_logfile = os.path.normpath(os.path.join(osqueryd_logdir, 'osqueryd.results.log'))
snapshot_logfile = os.path.normpath(os.path.join(osqueryd_logdir, 'osqueryd.snapshots.log'))
log.debug("Result log file resolved to: %s", result_logfile)
log.debug("Snapshot log file resolved to: %s", snapshot_logfile)
backuplogdir = backuplogdir or __opts__.get('osquerylog_backupdir')
logfilethresholdinbytes = logfilethresholdinbytes or __opts__.get('osquery_logfile_maxbytes')
maxlogfilesizethreshold = maxlogfilesizethreshold or __opts__.get(
'osquery_logfile_maxbytes_toparse')
backuplogfilescount = backuplogfilescount or __opts__.get('osquery_backuplogs_count')
if os.path.exists(result_logfile):
logfile_offset = _get_file_offset(result_logfile)
event_data = _parse_log(result_logfile,
logfile_offset,
backuplogdir,
logfilethresholdinbytes,
maxlogfilesizethreshold,
backuplogfilescount,
enablediskstatslogging)
if event_data:
ret += event_data
else:
log.warn("Specified osquery result log file doesn't exist: %s", result_logfile)
if os.path.exists(snapshot_logfile):
logfile_offset = _get_file_offset(snapshot_logfile)
event_data = _parse_log(snapshot_logfile,
logfile_offset,
backuplogdir,
logfilethresholdinbytes,
maxlogfilesizethreshold,
backuplogfilescount,
enablediskstatslogging)
if event_data:
ret += event_data
else:
log.warn("Specified osquery snapshot log file doesn't exist: %s", snapshot_logfile)
ret = _update_event_data(ret)
if mask_passwords:
log.info("Perform masking")
_mask_object(ret, topfile_for_mask)
return ret | 96428f3984b688b3a55c5d5b37087fcec97c12f4 | 12,205 |
import os
def get_filenames(data_dir, mode, valid_id, pred_id, overlap_step, patch_size):
"""Returns a list of filenames."""
if mode == 'train':
train_files = [
os.path.join(data_dir, 'subject-%d.tfrecords' % i)
for i in range(1, 11)
if i != valid_id
]
for f in train_files:
assert os.path.isfile(f), \
('Run generate_tfrecord.py to generate training files.')
return train_files
elif mode == 'valid':
valid_file = os.path.join(data_dir,
'subject-%d-valid-%d-patch-%d.tfrecords' % (valid_id, overlap_step, patch_size))
assert os.path.isfile(valid_file), \
('Run generate_tfrecord.py to generate the validation file.')
return [valid_file]
elif mode == 'pred':
pred_file = os.path.join(data_dir,
'subject-%d-pred-%d-patch-%d.tfrecords' % (pred_id, overlap_step, patch_size))
assert os.path.isfile(pred_file), \
('Run generate_tfrecord.py to generate the prediction file.')
return [pred_file] | 255a89254c860d7bbd7941da017e7e015406cf8d | 12,206 |
def downsample_data( data, factor, hdr ):
"""Resample data and update the header appropriately
If factor < 1, this is *upsampling*.
Use this function to just return the data and hdr parts in case you want to do further operations prior to saving.
order=3 appears to crash Python 64-bit on Windows when the image is very large (800x500x500) and the method is trilinear. Order=1 works.
"""
fraction = 1.0 / factor
# ds_data = ndimage.interpolation.zoom(data, zoom=fraction, order=1) # default order=3
# order=3 default
# order=1 for very high-resolution images (default crashes)
# order=0 for nearest neighbour
if len(data.shape) > 3:
print(" Data shape is {0}. Only the first three dimensions will be considered! (The output will be 3D: data[:,:,:,0])".format(data.shape))
ds_data = ndimage.interpolation.zoom(data[:,:,:,0], zoom=fraction, order=0)
else:
ds_data = ndimage.interpolation.zoom(data, zoom=fraction, order=0)
ds_hdr = copy.deepcopy(hdr)
ds_hdr.set_data_shape(ds_data.shape)
new_pixdims = hdr.structarr['pixdim'][1:4] * factor
print("Pixdims old: {0}, new: {1}.".format(hdr.structarr['pixdim'][1:4], new_pixdims))
ds_hdr.structarr['pixdim'][1:4] = new_pixdims
sform_old = hdr.get_sform()
print sform_old
resampling_affine = create_affine(trans=[factor,factor,factor], scale=[factor, factor, factor])
# Create the new sform matrix
sform_new = sform_old.dot(resampling_affine)
# Keep the exact-same translation elements
sform_new[0:3,3] = sform_old[0:3,3]
print sform_new
ds_hdr.set_sform(sform_new)
# hdr_new.set_sform(np.eye(4))
# hdr_new['srow_x'][0] = hdr_new['pixdim'][1]
# hdr_new['srow_y'][1] = hdr_new['pixdim'][2]
# hdr_new['srow_z'][2] = hdr_new['pixdim'][3]
# hdr_new.get_sform()
# hdr_new['srow_x'][3] = hdr_new['pixdim'][1]
# hdr_new['srow_y'][3] = hdr_new['pixdim'][2]
# hdr_new['srow_z'][3] = hdr_new['pixdim'][3]
return ds_data, ds_hdr
# End of downsample_data() definition | 889e698531255b862fe407d5ce64a3ed54b44c44 | 12,207 |
def check_if_present(driver: webdriver.Firefox, selector: str):
""" Checks if element is present on page by css selector """
return bool(driver.find_elements_by_css_selector(selector)) | 9cc4ebf92908ed8ef392cc20697734d553eb6db0 | 12,208 |
import glob
import os
def list_pdf_paths(pdf_folder):
"""
list of pdf paths in pdf folder
"""
return glob(os.path.join(pdf_folder, '*', '*', '*.pdf')) | 793629d3fbbc9c072f1b1bedbd374e10a1d88781 | 12,209 |
def cmi(x, y, z, k=3, base=2):
"""Mutual information of x and y, conditioned on z
x,y,z should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert len(x)==len(y), 'Lists should have same length.'
assert k <= len(x) - 1, 'Set k smaller than num samples - 1.'
intens = 1e-10 # Small noise to break degeneracy, see doc.
x = [list(p + intens*nr.rand(len(x[0]))) for p in x]
y = [list(p + intens*nr.rand(len(y[0]))) for p in y]
z = [list(p + intens*nr.rand(len(z[0]))) for p in z]
points = zip2(x,y,z)
# Find nearest neighbors in joint space, p=inf means max-norm.
tree = ss.cKDTree(points)
dvec = [tree.query(point, k+1, p=float('inf'))[0][k] for point in points]
a = avgdigamma(zip2(x,z), dvec)
b = avgdigamma(zip2(y,z), dvec)
c = avgdigamma(z,dvec)
d = digamma(k)
return (-a-b+c+d) / log(base) | b79a39dbe98202fedf4f572148cdce81c6608e3c | 12,210 |
def connected_plate():
"""Detects which plate from the PMA is connected to the device.
Returns:
FirmwareDeviceID: device ID of the connected plate. None if not detected
"""
for plate_id in (
FirmwareDeviceID.pt4_foundation_plate,
FirmwareDeviceID.pt4_expansion_plate,
):
status = __get_fw_device_status(plate_id)
if status.get("connected") is True:
return plate_id
return None | 4db471df81d63aa3c60f24b50fc25cef452db2d7 | 12,211 |
def islogin(_a=None):
"""
是否已经登录,如果已经登录返回token,否则False
"""
if _a is None:
global a
else:
a = _a
x=a.get(DOMAIN+"/apps/files/desktop/own",o=True)
t=a.b.find("input",{"id":"request_token"})
if t is None:
t = a.b.find("input",{"id":"oc_requesttoken"})
if t is None:
return False
else:
return t["value"] | 235a4662ecca8c496b83aec5bc0408bbd8ae45ec | 12,212 |
def discrete_coons_patch(ab, bc, dc, ad):
"""Creates a coons patch from a set of four or three boundary
polylines (ab, bc, dc, ad).
Parameters
----------
ab : list[[float, float, float] | :class:`~compas.geometry.Point`]
The XYZ coordinates of the vertices of the first polyline.
bc : list[[float, float, float] | :class:`~compas.geometry.Point`]
The XYZ coordinates of the vertices of the second polyline.
dc : list[[float, float, float] | :class:`~compas.geometry.Point`]
The XYZ coordinates of the vertices of the third polyline.
ad : list[[float, float, float] | :class:`~compas.geometry.Point`]
The XYZ coordinates of the vertices of the fourth polyline.
Returns
-------
list[[float, float, float]]
The points of the coons patch.
list[list[int]]
List of faces, with every face a list of indices into the point list.
Notes
-----
The vertices of the polylines are assumed to be in the following order::
b -----> c
^ ^
| |
| |
a -----> d
To create a triangular patch, one of the input polylines should be None.
(Warning! This will result in duplicate vertices.)
For more information see [1]_ and [2]_.
References
----------
.. [1] Wikipedia. *Coons patch*.
Available at: https://en.wikipedia.org/wiki/Coons_patch.
.. [2] Robert Ferreol. *Patch de Coons*.
Available at: https://www.mathcurve.com/surfaces/patchcoons/patchcoons.shtml
Examples
--------
>>>
"""
if not ab:
ab = [ad[0]] * len(dc)
if not bc:
bc = [ab[-1]] * len(ad)
if not dc:
dc = [bc[-1]] * len(ab)
if not ad:
ad = [dc[0]] * len(bc)
n = len(ab)
m = len(bc)
n_norm = normalize_values(range(n))
m_norm = normalize_values(range(m))
array = [[0] * m for i in range(n)]
for i, ki in enumerate(n_norm):
for j, kj in enumerate(m_norm):
# first function: linear interpolation of first two opposite curves
lin_interp_ab_dc = add_vectors(scale_vector(ab[i], (1 - kj)), scale_vector(dc[i], kj))
# second function: linear interpolation of other two opposite curves
lin_interp_bc_ad = add_vectors(scale_vector(ad[j], (1 - ki)), scale_vector(bc[j], ki))
# third function: linear interpolation of four corners resulting a hypar
a = scale_vector(ab[0], (1 - ki) * (1 - kj))
b = scale_vector(bc[0], ki * (1 - kj))
c = scale_vector(dc[-1], ki * kj)
d = scale_vector(ad[-1], (1 - ki) * kj)
lin_interp_a_b_c_d = sum_vectors([a, b, c, d])
# coons patch = first + second - third functions
array[i][j] = subtract_vectors(add_vectors(lin_interp_ab_dc, lin_interp_bc_ad), lin_interp_a_b_c_d)
# create vertex list
vertices = []
for i in range(n):
vertices += array[i]
# create face vertex list
faces = []
for i in range(n - 1):
for j in range(m - 1):
faces.append([i * m + j, i * m + j + 1, (i + 1) * m + j + 1, (i + 1) * m + j])
return vertices, faces | 19393f8eea5164e0f6cf89463dd3d68329c395d5 | 12,213 |
def w(shape, stddev=0.01):
"""
@return A weight layer with the given shape and standard deviation. Initialized with a
truncated normal distribution.
"""
return tf.Variable(tf.truncated_normal(shape, stddev=stddev)) | fd3d0bb6fb5565ce4ff5b4aafb80eccf711072db | 12,214 |
def lr_mult(alpha):
"""Decreases the learning rate update by a factor of alpha."""
@tf.custom_gradient
def _lr_mult(x):
def grad(dy):
return dy * alpha * tf.ones_like(x)
return x, grad
return _lr_mult | b5ab1c2b01025aee74c5f4c92b81eabf08c20de1 | 12,215 |
import PIL
import io
def get_png_string(mask_array):
"""Builds PNG string from mask array.
Args:
mask_array (HxW): Mask array to generate PNG string from.
Returns: String of mask encoded as a PNG.
"""
# Convert the new mask back to an image.
image = PIL.Image.fromarray(mask_array.astype('uint8')).convert('RGB')
# Save the new image to a PNG byte string.
byte_buffer = io.BytesIO()
image.save(byte_buffer, format='png')
byte_buffer.seek(0)
return byte_buffer.read() | f44dd5417cb5250587926da939b5923921c62ead | 12,216 |
def rotate90ccw(v):
"""Rotate 2d vector 90 degrees counter clockwise
"""
return (-(v[1]), v[0]) | cde43efd01e9fff3002623437f99e521790e33f2 | 12,217 |
def GetListOfCellTestPointsNearestListOfPointsV5(inInputFilter, pointList):
"""for each point in the list, find the cell test point (e.g. center of
cell bounding box) which is nearest the test point. Use MPI to work
in parallel"""
thisProcessNearestCellPointList, thisProcDistSqrdList = \
GetCellsClosestToPointsOnThisProcessFromParaViewFilterV5(inInputFilter, pointList)
nearestCellList, distanceList = UseMpiToGetGlobalCellPointsClosestV5(
inInputFilter, thisProcessNearestCellPointList, thisProcDistSqrdList)
return nearestCellList | c4dc0fcdb9d85dbc2ce1654743284400ca11e3d6 | 12,218 |
def _BuildBaseMTTCmd(args, host):
"""Build base MTT cmd."""
remote_mtt_binary = _REMOTE_MTT_BINARY_FORMAT % host.context.user
remote_cmd = [remote_mtt_binary]
if args.very_verbose:
remote_cmd += ['-vv']
elif args.verbose:
remote_cmd += ['-v']
# We copy the mtt binary inside mtt_lab to remote host,
# there is not need to update the mtt binary on the remote host.
remote_cmd += ['--no_check_update']
return remote_cmd | 7506a39b79c81bc3b279280e69820ccdbb8ed664 | 12,219 |
def get_service(credentials=get_credentials()):
"""Gets GMail service, given credentials"""
return apiclient.discovery.build("gmail", "v1", credentials=credentials) | 7f344f07cc2d78014381bd449d6655658f2e4881 | 12,220 |
def edb_client_server_info(edb: ElectrolyteDB) -> dict:
"""
Perform an operation that ensures that the `edb` fixture has a client that:
- Is able to connect to the server (non-mock), or
- Is a "convincing fake" (mock), so that test functions using `edb` can expect a realistic behavior
Additionally, if this fixture is dispatched before "real" users (here this is done by using a `test__` prefix with two underscores),
it avoids any first-use inconsistencies, such as e.g. the time spent to wait for a connection
being counted as part of the duration of the first test for which the `edb` fixture is instantiated.
"""
return edb._client.server_info() | 83e9d4bb7bf76e7ed5a73c69a4a2a617227526f8 | 12,221 |
def convert_to_int_list(dataframe: pd.Series) -> "list[list[int]]":
"""
Takes a dataframe with a string representation of a list of ints
and converts that into a list of lists of ints
"""
result_list = []
for row in dataframe:
result_list.append([int(x) for x in row[1:-1].split(", ")])
return result_list | 0d0ed69db3af04a21a65d472c53580d8f88f50f0 | 12,222 |
def get_prism_daily_single(variable,
date,
return_path=False,
**kwargs):
"""Download data for a single day
Parameters
----------
variable : str
Either tmean, tmax, tmin, or ppt
date : str
The date to download in the format YYYY-MM-DD
dest_path : str, optional
Folder to download to, defaults to the current working directory.
return_path : bool, optional
Returns the full path to the final bil file, default False
keep_zip : bool, optional
Keeps the originally downloaded zip file, default True
"""
daily = PrismDaily(variable=variable,
min_date=date,
max_date=date,
**kwargs)
daily.download()
daily.close()
if return_path:
return daily._local_bil_filename(daily.dates[0]) | 5fe14da937452e040e2fcf7d38d04d1068f2bff8 | 12,223 |
import base64
import io
def handle_filestreams(list_of_contents, list_of_names):
"""
Args:
list_of_contents:
list_of_names:
"""
if len(list_of_contents) == 1:
content = list_of_contents[0]
filename = list_of_names[0]
else:
raise Exception("Multiple files not supported") # TODO
content_type, content_string = content.split(',')
decoded = base64.b64decode(content_string)
if 'csv' in filename: # Assume that the user uploaded a CSV file
file = io.StringIO(decoded.decode('utf-8'))
elif 'xls' in filename: # Assume that the user uploaded an excel file
file = io.BytesIO(decoded)
elif 'tsv' in filename: # Assume that the user uploaded an tsv file
file = io.StringIO(decoded.decode('utf-8'))
elif 'txt' in filename: # Assume that the user uploaded either a tsv or csv file
file = io.StringIO(decoded.decode('utf-8'))
else:
raise IOError("Unable to read table file.")
return file | 3ab582bbf6d709d0ebcdb6f71825734f43e8bc86 | 12,224 |
def grid_count(grid, shape=None, interpolation='linear', bound='zero',
extrapolate=False):
"""Splatting weights with respect to a deformation field (pull adjoint).
Notes
-----
{interpolation}
{bound}
Parameters
----------
grid : ([batch], *inshape, dim) tensor
Transformation field.
shape : sequence[int], default=inshape
Output shape
interpolation : int or sequence[int], default=1
Interpolation order.
bound : BoundType, or sequence[BoundType], default='zero'
Boundary conditions.
extrapolate : bool or int, default=True
Extrapolate out-of-bound data.
Returns
-------
output : ([batch], 1, *shape) tensor
Spatting weights.
"""
dim = grid.shape[-1]
grid_no_batch = grid.dim() == dim + 1
if grid_no_batch:
grid = grid[None]
if shape is None:
shape = tuple(grid.shape[1:-1])
out = GridCount.apply(grid, shape, interpolation, bound, extrapolate)
if grid_no_batch:
out = out[0]
return out | 620713f2e234d1a2195fdebba5215a2c1f2493a3 | 12,225 |
def check_spf_record(lookup, spf_record):
"""
Check that all parts of lookup appear somewhere in the given SPF record, resolving
include: directives recursively
"""
not_found_lookup_parts = set(lookup.split(" "))
_check_spf_record(not_found_lookup_parts, spf_record, 0)
return not not_found_lookup_parts | 831c6d07a91484ce6b96bdc507a8f31034193590 | 12,226 |
import pytz
def local_tz2() -> pytz.BaseTzInfo:
"""
Second timezone for the second user
"""
return pytz.timezone("America/Los_Angeles") | d841f3ea06334540b8dca6fd2c2a2e823227fa37 | 12,227 |
def get_chemistry_info(sam_header, input_filenames, fail_on_missing=False):
"""Get chemistry triple information for movies referenced in a SAM
header.
Args:
sam_header: a pysam.Samfile.header, which is a multi-level dictionary.
Movie names are read from RG tags in this header.
input_filenames: a list of bas, bax, or fofn filenames.
fail_on_missing: if True, raise an exception if the chemistry
information for a movie in the header cannot be
found. If False, just log a warning.
Returns:
a list of strings that can be written as DS tags to RG entries in the
header of a new SAM or BAM file. For example,
['BINDINGKIT:xxxx;SEQUENCINGKIT:yyyy;SOFTWAREVERSION:2.0']
Raises:
ChemistryLoadingException if chemistry information cannot be found
for a movie in the header and fail_on_missing is True.
"""
# First get the full list of ba[sx] files, reading through any fofn or xml
# inputs
bas_filenames = []
for filename in input_filenames:
bas_filenames.extend(FofnIO.enumeratePulseFiles(filename))
# Then get the chemistry triple for each movie in the list of bas files
triple_dict = {}
for bas_filename in bas_filenames:
bas_file = BasH5IO.BasH5Reader(bas_filename)
movie_name = bas_file.movieName
chem_triple = bas_file.chemistryBarcodeTriple
triple_dict[movie_name] = chem_triple
# Finally, find the movie names that appear in the header and create CO
# lines with the chemistry triple
if 'RG' not in sam_header:
return []
rgds_entries = {}
for rg_entry in sam_header['RG']:
rg_id = rg_entry['ID']
rg_movie_name = rg_entry[MOVIENAME_TAG]
try:
rg_chem_triple = triple_dict[rg_movie_name]
rgds_entries[rg_id] = rg_chem_triple
except KeyError:
err_msg = ("Cannot find chemistry information for movie {m}."
.format(m=rg_movie_name))
if fail_on_missing:
raise ChemistryLoadingException(err_msg)
else:
log.warning(err_msg)
rgds_strings = format_rgds_entries(rgds_entries)
return rgds_strings | 4bfdd7f09061650c0e010f71cd00cbd44481c40f | 12,228 |
import warnings
import http
def json_catalog(request, domain='djangojs', packages=None):
"""
Return the selected language catalog as a JSON object.
Receives the same parameters as javascript_catalog(), but returns
a response with a JSON object of the following format:
{
"catalog": {
# Translations catalog
},
"formats": {
# Language formats for date, time, etc.
},
"plural": '...' # Expression for plural forms, or null.
}
"""
warnings.warn(
"The json_catalog() view is deprecated in favor of the "
"JSONCatalog view.", RemovedInDjango20Warning, stacklevel=2
)
locale = _get_locale(request)
packages = _parse_packages(packages)
catalog, plural = get_javascript_catalog(locale, domain, packages)
data = {
'catalog': catalog,
'formats': get_formats(),
'plural': plural,
}
return http.JsonResponse(data) | f2ac449d11299471184f0ddaeac87d543e90b7a3 | 12,229 |
from typing import Dict
from typing import Any
def read_global_config() -> Dict[Text, Any]:
"""Read global Rasa configuration."""
# noinspection PyBroadException
try:
return rasa.utils.io.read_yaml_file(GLOBAL_USER_CONFIG_PATH)
except Exception:
# if things go south we pretend there is no config
return {} | 0287aa03a07b7ce5218f56237cd49d9ea18f8d5f | 12,230 |
def get_uuid_hex(digest_size: int = 10) -> str:
"""Generate hex of uuid4 with the defined size."""
return blake2b(uuid4().bytes, digest_size=digest_size).hexdigest() | 4ec853740b7f17bbf7cb90fd5e25c9d7719440c8 | 12,231 |
def crc16(data):
"""CRC-16-CCITT computation with LSB-first and inversion."""
crc = 0xffff
for byte in data:
crc ^= byte
for bits in range(8):
if crc & 1:
crc = (crc >> 1) ^ 0x8408
else:
crc >>= 1
return crc ^ 0xffff | 2560f53c1f2b597d556a0b63462ef56f0c972db2 | 12,232 |
def _unlink_f(filename):
""" Call os.unlink, but don't die if the file isn't there. This is the main
difference between "rm -f" and plain "rm". """
try:
os.unlink(filename)
return True
except OSError, e:
if e.errno not in (errno.ENOENT, errno.EPERM, errno.EACCES,errno.EROFS):
raise
return False | f0014e7ab0dfb6db519198f1eb052d930542a63f | 12,233 |
def get_member_expr_fullname(expr: MemberExpr) -> str:
"""Return the qualified name representation of a member expression.
Return a string of form foo.bar, foo.bar.baz, or similar, or None if the
argument cannot be represented in this form.
"""
if isinstance(expr.expr, NameExpr):
initial = expr.expr.name
elif isinstance(expr.expr, MemberExpr):
initial = get_member_expr_fullname(expr.expr)
else:
return None
return '{}.{}'.format(initial, expr.name) | 2859751a977f028f0984e86422f31f388314414a | 12,234 |
def _read_dino_waterlvl_metadata(f, line):
"""read dino waterlevel metadata
Parameters
----------
f : text wrapper
line : str
line with meta dictionary keys
meta_dic : dict (optional)
dictionary with metadata
Returns
-------
meta : dict
dictionary with metadata
"""
meta_keys = line.strip().split(",")
meta_values = f.readline().strip().split(",")
meta = {}
for key, value in zip(meta_keys, meta_values):
key = key.strip()
if key in ["X-coordinaat", "Y-coordinaat"]:
if key == "X-coordinaat":
meta["x"] = float(value)
elif key == "Y-coordinaat":
meta["y"] = float(value)
elif key == "Locatie":
meta["locatie"] = value
meta["name"] = value
return meta | 949535f4fc677a7d0afc70a76e377ccefcc8943f | 12,235 |
def async_get_url(
hass: HomeAssistant,
*,
require_ssl: bool = False,
require_standard_port: bool = False,
allow_internal: bool = True,
allow_external: bool = True,
allow_cloud: bool = True,
allow_ip: bool = True,
prefer_external: bool = False,
prefer_cloud: bool = False,
) -> str:
"""Get a URL to this instance."""
order = [TYPE_URL_INTERNAL, TYPE_URL_EXTERNAL]
if prefer_external:
order.reverse()
# Try finding an URL in the order specified
for url_type in order:
if allow_internal and url_type == TYPE_URL_INTERNAL:
try:
return _async_get_internal_url(
hass,
allow_ip=allow_ip,
require_ssl=require_ssl,
require_standard_port=require_standard_port,
)
except NoURLAvailableError:
pass
if allow_external and url_type == TYPE_URL_EXTERNAL:
try:
return _async_get_external_url(
hass,
allow_cloud=allow_cloud,
allow_ip=allow_ip,
prefer_cloud=prefer_cloud,
require_ssl=require_ssl,
require_standard_port=require_standard_port,
)
except NoURLAvailableError:
pass
# We have to be honest now, we have no viable option available
raise NoURLAvailableError | 1d4e13a8fa5d26bbc9132e85937a24d320136112 | 12,236 |
import os
def validate_local_model(models_path: str, model_id: str) -> bool:
"""Validate local model by id.
Args:
models_path: Path to the models folder.
model_id: Model id.
"""
model_correct = True
model_path = os.path.join(models_path, model_id)
if not os.path.exists(model_path):
model_correct = False
else:
try:
_ = get_model_type_name(models_path, model_id)
except FileNotFoundError:
model_correct = False
return model_correct | 9e081b5175ceb46d2caac57d0342f4660946d18e | 12,237 |
def strip(prefix: Seq, seq: Seq, partial=False, cmp=NOT_GIVEN) -> Iter:
"""
If seq starts with the same elements as in prefix, remove them from
result.
Args:
prefix:
Prefix sequence to possibly removed from seq.
seq:
Sequence of input elements.
partial:
If True, remove partial matches with prefix.
cmp:
If given, uses as a comparation function between elements of prefix
and sequence. It removes elements that cmp(x, y) returns True.
Examples:
>>> ''.join(strip("ab", "abcd"))
'cd'
>>> strip(sk.repeat(3), range(6), partial=True, cmp=(X > Y))
sk.iter([3, 4, 5])
"""
if partial:
cmp = NOT_GIVEN.resolve(cmp, op.eq)
return Iter(_strip_partial(iter(prefix), iter(seq), cmp=cmp))
elif cmp is NOT_GIVEN:
return Iter(_strip_full(tuple(prefix), iter(seq)))
else:
return Iter(_strip_full_cmp(tuple(prefix), iter(seq), cmp)) | 8d2a9a62157e3b55adcc976d5c7693cb67513c92 | 12,238 |
def _read_unicode_table(instream, separator, startseq, encoding):
"""Read the Unicode table in a PSF2 file."""
raw_table = instream.read()
entries = raw_table.split(separator)[:-1]
table = []
for point, entry in enumerate(entries):
split = entry.split(startseq)
code_points = [_seq.decode(encoding) for _seq in split]
# first entry is separate code points, following entries (if any) are sequences
table.append([_c for _c in code_points[0]] + code_points[1:])
return table | e27e59b57d10cb20dd4ddc832c65cb8802984d44 | 12,239 |
from click.testing import CliRunner
def runner():
"""Provides a command-line test runner."""
return CliRunner() | 82b75c8dcaa0105c623a1caea5b459c97e3e18fd | 12,240 |
def matsubara_exponents(coup_strength, bath_broad, bath_freq, beta, N_exp):
"""
Calculates the exponentials for the correlation function for matsubara
terms. (t>=0)
Parameters
----------
coup_strength: float
The coupling strength parameter.
bath_broad: float
A parameter characterizing the FWHM of the spectral density, i.e.,
the cavity broadening.
bath_freq: float
The cavity frequency.
beta: float
The inverse temperature.
N_exp: int
The number of exponents to consider in the sum.
Returns
-------
ck: ndarray
A 1D array with the prefactors for the exponentials
vk: ndarray
A 1D array with the frequencies
"""
lam = coup_strength
gamma = bath_broad
w0 = bath_freq
N_exp = N_exp
omega = np.sqrt(w0 ** 2 - (gamma / 2) ** 2)
a = omega + 1j * gamma / 2.0
aa = np.conjugate(a)
coeff = (-4 * gamma * lam ** 2 / np.pi) * ((np.pi / beta) ** 2)
vk = np.array([-2 * np.pi * n / (beta) for n in range(1, N_exp)])
ck = np.array(
[
n
/ (
(a ** 2 + (2 * np.pi * n / beta) ** 2)
* (aa ** 2 + (2 * np.pi * n / beta) ** 2)
)
for n in range(1, N_exp)
]
)
return coeff * ck, vk | 4d6a1691234f12a5cbdec13b2520891c0f30eb77 | 12,241 |
def reverse(array):
"""Return `array` in reverse order.
Args:
array (list|string): Object to process.
Returns:
list|string: Reverse of object.
Example:
>>> reverse([1, 2, 3, 4])
[4, 3, 2, 1]
.. versionadded:: 2.2.0
"""
# NOTE: Using this method to reverse object since it works for both lists
# and strings.
return array[::-1] | 5eb096d043d051d4456e08fae91fb52048686992 | 12,242 |
def compute_segregation_profile(gdf,
groups=None,
distances=None,
network=None,
decay='linear',
function='triangular',
precompute=True):
"""Compute multiscalar segregation profile.
This function calculates several Spatial Information Theory indices with
increasing distance parameters.
Parameters
----------
gdf : geopandas.GeoDataFrame
geodataframe with rows as observations and columns as population
variables. Note that if using a network distance, the coordinate
system for this gdf should be 4326. If using euclidian distance,
this must be projected into planar coordinates like state plane or UTM.
groups : list
list of variables .
distances : list
list of floats representing bandwidth distances that define a local
environment.
network : pandana.Network (optional)
A pandana.Network likely created with
`segregation.network.get_osm_network`.
decay : str (optional)
decay type to be used in pandana accessibility calculation (the
default is 'linear').
function: 'str' (optional)
which weighting function should be passed to pysal.lib.weights.Kernel
must be one of: 'triangular','uniform','quadratic','quartic','gaussian'
precompute: bool
Whether the pandana.Network instance should precompute the range
queries.This is true by default, but if you plan to calculate several
segregation profiles using the same network, then you can set this
parameter to `False` to avoid precomputing repeatedly inside the
function
Returns
-------
dict
dictionary with distances as keys and SIT statistics as values
Notes
-----
Based on Sean F. Reardon, Stephen A. Matthews, David O’Sullivan, Barrett A. Lee, Glenn Firebaugh, Chad R. Farrell, & Kendra Bischoff. (2008). The Geographic Scale of Metropolitan Racial Segregation. Demography, 45(3), 489–514. https://doi.org/10.1353/dem.0.0019.
Reference: :cite:`Reardon2008`.
"""
gdf = gdf.copy()
gdf[groups] = gdf[groups].astype(float)
indices = {}
indices[0] = MultiInformationTheory(gdf, groups).statistic
if network:
if not gdf.crs['init'] == 'epsg:4326':
gdf = gdf.to_crs(epsg=4326)
groups2 = ['acc_' + group for group in groups]
if precompute:
maxdist = max(distances)
network.precompute(maxdist)
for distance in distances:
distance = np.float(distance)
access = calc_access(gdf,
network,
decay=decay,
variables=groups,
distance=distance,
precompute=False)
sit = MultiInformationTheory(access, groups2)
indices[distance] = sit.statistic
else:
for distance in distances:
w = Kernel.from_dataframe(gdf,
bandwidth=distance,
function=function)
sit = SpatialInformationTheory(gdf, groups, w=w)
indices[distance] = sit.statistic
return indices | 3598d7c72660860330847758fc744fcd1b1f40ce | 12,243 |
def calculate_lbp_pixel(image, x, y):
"""Perform the LBP operator on a given pixel.
Order and format:
32 | 64 | 128
----+-----+-----
16 | 0 | 1
----+-----+-----
8 | 4 | 2
:param image: Input image
:type: numpy.ndarray
:param x: Column pixel of interest
:type: int
:param y: Row pixel of interst
:type: int
:return: LBP value
:rtype: numpy.ndarray
"""
center = image[x][y]
binary_code = np.empty(8)
binary_code[0] = threshold_pixel(image, center, x, y + 1) # Right
binary_code[1] = threshold_pixel(image, center, x + 1, y + 1) # Bottom Right
binary_code[2] = threshold_pixel(image, center, x + 1, y) # Bottom
binary_code[3] = threshold_pixel(image, center, x + 1, y - 1) # Bottom Left
binary_code[4] = threshold_pixel(image, center, x, y - 1) # Left
binary_code[5] = threshold_pixel(image, center, x - 1, y - 1) # Top Left
binary_code[6] = threshold_pixel(image, center, x - 1, y) # Top
binary_code[7] = threshold_pixel(image, center, x - 1, y + 1) # Top Right
weights = np.array([1, 2, 4, 8, 16, 32, 64, 128])
lbp_value = np.dot(binary_code, weights).astype(np.uint8)
return lbp_value | 14f6bd557355a71379b638e52f21d72ccd30a7cb | 12,244 |
def test_gradient_sparse_var():
"""
https://www.tensorflow.org/beta/guide/effective_tf2
"""
target = tf.constant([[1., 0., 0.], [1., 0., 0.]])
v = tf.Variable([0.5, 0.5])
x = tx.Lambda([],
fn=lambda _: tf.SparseTensor([[0, 0], [1, 1]], v, [2, 3]),
n_units=3,
var_list=v)
assert isinstance(x(), tf.SparseTensor)
assert len(x.trainable_variables) == 1
y = tx.Linear(x, n_units=3)
# a graph without inputs needs to have missing inputs declared
# otherwise it will try to add the inputs detected to inputs
graph = tx.Graph.build(inputs=None,
outputs=y)
fn = graph.as_function()
@tf.function
def loss(labels):
return tf.reduce_mean(tf.pow(labels - fn(), 2))
with tf.GradientTape() as tape:
loss_val = loss(target)
assert tx.same_shape(tape.gradient(loss_val, v), v.value()) | e43a84a052313fecd11eca60a027f00385cd252f | 12,245 |
def get_setup_and_moves(sgf_game, board=None):
"""Return the initial setup and the following moves from an Sgf_game.
Returns a pair (board, plays)
board -- boards.Board
plays -- list of pairs (colour, move)
moves are (row, col), or None for a pass.
The board represents the position described by AB and/or AW properties
in the root node.
The moves are from the game's 'leftmost' variation.
Raises ValueError if this position isn't legal.
Raises ValueError if there are any AB/AW/AE properties after the root
node.
Doesn't check whether the moves are legal.
If the optional 'board' parameter is provided, it must be an empty board of
the right size; the same object will be returned.
"""
size = sgf_game.get_size()
if board is None:
board = boards.Board(size)
else:
if board.side != size:
raise ValueError("wrong board size, must be %d" % size)
if not board.is_empty():
raise ValueError("board not empty")
root = sgf_game.get_root()
nodes = sgf_game.main_sequence_iter()
ab, aw, ae = root.get_setup_stones()
if ab or aw:
is_legal = board.apply_setup(ab, aw, ae)
if not is_legal:
raise ValueError("setup position not legal")
colour, raw = root.get_raw_move()
if colour is not None:
raise ValueError("mixed setup and moves in root node")
nodes.next()
moves = []
for node in nodes:
if node.has_setup_stones():
raise ValueError("setup properties after the root node")
colour, raw = node.get_raw_move()
if colour is not None:
moves.append((colour, sgf_properties.interpret_go_point(raw, size)))
return board, moves | a933c067baa49d8e6c7309f7762298244a192d2e | 12,246 |
def help_text_metadata(label=None, description=None, example=None):
"""
Standard interface to help specify the required metadata fields for helptext to
work correctly for a model.
:param str label: Alternative name for the model.
:param str description: Long description of the model.
:param example: A concrete example usage of the model.
:return dict: Dictionary of the help text metadata
"""
return {
'label': label,
'description': description,
'example': example
} | a1fb9c9a9419fe7ce60ed77bc6fadc97ed4523f8 | 12,247 |
def conv1d_stack(sequences, filters, activations, name=None):
"""Convolve a jagged batch of sequences with a stack of filters.
This is equivalent to running several `conv1d`s on each `sequences[i]` and
reassembling the results as a `Jagged`. The padding is always 'SAME'.
Args:
sequences: 4-D `Jagged` tensor.
filters: List of 3-D filters (one filter per layer). Must have odd width.
activations: List of activation functions to apply after each layer, or
None to indicate no activation.
name: Optional name for this operation.
Returns:
`Jagged` convolution results.
Raises:
TypeError: If sequences is not Jagged.
ValueError: If the filters or activations are invalid.
"""
if not isinstance(sequences, Jagged):
raise TypeError('Expected Jagged sequences, got %s' % type(Jagged))
if len(filters) != len(activations):
raise ValueError('Got %d filters != %d activations' %
(len(filters), len(activations)))
if not filters:
return sequences
with tf.name_scope(name, 'jagged_conv1d_stack') as name:
# Compute maximum filter width
filters = [tf.convert_to_tensor(f, name='filter') for f in filters]
width = 0
for filt in filters:
shape = filt.get_shape()
if shape.ndims != 3 or shape[0] is None or shape[0].value % 2 == 0:
raise ValueError('Expected known odd filter width, got shape %s' %
shape)
width = max(width, shape[0].value)
between = width // 2 # Rounds down since width is odd
# Add 'between' zeros between each sequence
flat = sequences.flat
sizes = flatten(sequences.sizes)
size = tf.size(sizes)
flat_shape = tf.shape(flat)
flat_len = flat_shape[0]
indices = (tf.range(flat_len) + repeats(between * tf.range(size), sizes))
padded_len = between * tf.nn.relu(size - 1) + flat_len
flat = tf.unsorted_segment_sum(flat, indices, padded_len)[None]
# Make a mask to reset between portions to zero
if len(filters) > 1:
mask = tf.unsorted_segment_sum(
tf.ones(flat_shape[:1], dtype=flat.dtype), indices, padded_len)
mask = mask[:, None]
# Do each convolution
for i, (filt, activation) in enumerate(zip(filters, activations)):
if i:
flat *= mask
flat = tf.nn.conv1d(flat, filt, stride=1, padding='SAME')
if activation is not None:
flat = activation(flat)
# Extract results and repackage as a Jagged
flat = tf.squeeze(flat, [0])
flat = tf.gather(flat, indices, name=name)
return Jagged(sequences.sizes, flat) | 33248627280ea0c127d710128790e58e0ca5bb9a | 12,248 |
def dh_mnthOfYear(value, pattern):
"""
Helper for decoding a single integer value.
The value should be >=1000, no conversion,
no rounding (used in month of the year)
"""
return dh_noConv(value, pattern, _formatLimit_MonthOfYear[0]) | 5dd1027a0713cb93ea6e3504f1a07517f228a037 | 12,249 |
from typing import Counter
def build_team(datafile, salary_col, position_col, prediction_col, cap=60000, legal_teams=None):
"""
Construct teams from a set of prediction data
:param str datafile: saved prediction data (pickle file)
:param str salary_col: name of salary column
:param str position_col: name of position column
:param str prediction_col: name of prediction column to use
:param list[str] legal_teams: an optional list of legal NBA teams for the game
:return pd.DataFrame: prediction data for chosen team
"""
player_data = pd.read_pickle(datafile)
# Load real names for later use
player_data['name'] = player_data['bref_id'].apply(id2name)
if legal_teams:
player_data = player_data[player_data['Tm'].isin(legal_teams)]
# Ditch any undefined rows for salary / position / prediction as they will break the solver
player_data.dropna(subset=[salary_col, position_col, prediction_col], inplace=True)
# Cast player cost column to integers; this will also break the solver! :)
player_data[salary_col] = player_data[salary_col].astype(int)
# an optimization: speed up computation by only keeping the best-projected two players at each (position, salary).
# this should mean we only keep players we could potentially use
# it is hypothetically true that this could burn us if we get hit by the "too many players from team X" consideration
#grouped_player_data = player_data.groupby([salary_col, position_col], sort=False)
# this actually figures out how many players we need at the given position and keeps only that many at each salary level
#candidates = grouped_player_data.apply(lambda group: group.sort(prediction_col).tail(positions[group[position_col].iloc[0]]))
#
# more detailed, even more aggressive sketchier optimization: remove all players which are strictly worse than others
# (all players for whom two players are better and at least as cheap -- or one for centers. I hard coded that to save time)
# this could burn us pretty hard if we run into a team constraint in the end
def dominators(row):
return len(player_data[(player_data['predicted'] > row['predicted'])
& (player_data['salary'] <= row['salary'])
& (player_data['pos'] == row['pos'])])
player_data['dominators'] = player_data.apply(dominators, axis=1)
candidates = player_data[(player_data['dominators'] == 0) |
((player_data['pos'] != 'C') & (player_data['dominators'] <= 1))]
candidates.set_index('bref_id', inplace=True)
while True: # because python doesn't have do... while
best_team = best_vorp(data=candidates,
cost_column=salary_col,
value_column=prediction_col,
type_column=position_col,
required_types=positions,
cap=cap,
debug_print_fn=print)
# Implement an additional constraint -- we can't have more than 4 players from the same team.
# We'll actually be a little stricter and try to restrict it at 3 (see MAX_PLAYERS_PER_TEAM).
teams_of_selection = Counter(candidates.loc[best_team, 'Tm'].values)
most_common_team, count = teams_of_selection.popitem()
if count <= MAX_PLAYERS_PER_TEAM:
return candidates.loc[best_team]
else:
# Nope, this is an illegal team. Try to help us generate a real one by dropping the lowest-valued player
# on the team from the list of possible candidates.
players_on_most_common_team = [c for c in best_team if candidates.loc[c, 'Tm'] == most_common_team]
team_players = candidates.loc[players_on_most_common_team].copy()
team_players['value'] = team_players[prediction_col].divide(team_players[salary_col])
team_players.sort('value', inplace=True)
worst_player = team_players.iloc[0].name
print('Ideal team had %d players from %s. Banning player: %s' % (count, most_common_team, worst_player))
candidates = candidates.drop([worst_player]) | c1ac67fe72926e2f97268e753d3d97d9f8169840 | 12,250 |
import traceback
import six
def serialize_remote_exception(failure_info):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = six.text_type(failure.__class__.__name__)
mod_name = six.text_type(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data | 549b996afc2b07b9e72f69cd2c6ddaee4010f5af | 12,251 |
def build_eval_graph(features, model):
"""
builds evaluation graph
"""
_params = {}
logger.debug("building evaluation graph: %s.", _params)
with tf.variable_scope("loss"):
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=tf.expand_dims(features["sentiment"], axis=1),
logits=model["logits"])
model = {"loss": loss, "eval_args": _params}
return model | a63f88d30c817dfc648f6b774acfb78efd6db42c | 12,252 |
import pprint
def in_common(routes):
"""routes is a list of lists, each containing a route to a peer."""
r = []
branch = False
for n in izip_any(*routes): #itertools.izip(*routes):
# strip dead nodes
f = [i for i in n if i != '*']
# ignore all dead nodes
if len(f) == 0:
continue
c = [ (f.count(x), x) for x in f ]
c.sort()
if debug:
pprint(c)
top = c[-1][0]
# majority wins
if top > 2 and top > (len(f) * 0.50):
f = [c[-1][1]]
if len(set(f)) == 1:
r.append(f[0])
else:
# more than one unique node, the tree has branched
branch = True
break
return (branch, r) | 82858ea2abb2e8fd1ffaaefef3d07c9aeb29c61a | 12,253 |
def process_polygon(coordinates):
"""Pass list of co-ordinates to Shapely Polygon function and get polygon object"""
return Polygon(coordinates) | fe644dc41e7951a030df511bb7e76b2e74883cae | 12,254 |
def split_function(vector, column, value):
"""
Split function
"""
return vector[column] >= value | c6129422fd5bf0b16229e6346adde5f50b203e7b | 12,255 |
def blackwell(Sv, theta, phi, r,
r0=10, r1=1000,
tSv=-75, ttheta=702, tphi=282,
wtheta=28 , wphi=52):
"""
Detects and mask seabed using the split-beam angle and Sv, based in
"Blackwell et al (2019), Aliased seabed detection in fisheries acoustic
data". Complete article here: https://arxiv.org/abs/1904.10736
Args:
Sv (float): 2D numpy array with Sv data (dB)
theta (float): 2D numpy array with the along-ship angle (degrees)
phi (float): 2D numpy array with the athwart-ship angle (degrees)
r (float): 1D range array (m)
r0 (int): minimum range below which the search will be performed (m)
r1 (int): maximum range above which the search will be performed (m)
tSv (float): Sv threshold above which seabed is pre-selected (dB)
ttheta (int): Theta threshold above which seabed is pre-selected (dB)
tphi (int): Phi threshold above which seabed is pre-selected (dB)
wtheta (int): window's size for mean square operation in Theta field
wphi (int): window's size for mean square operation in Phi field
Returns:
bool: 2D array with seabed mask
"""
# delimit the analysis within user-defined range limits
r0 = np.nanargmin(abs(r - r0))
r1 = np.nanargmin(abs(r - r1)) + 1
Svchunk = Sv[r0:r1, :]
thetachunk = theta[r0:r1, :]
phichunk = phi[r0:r1, :]
# get blur kernels with theta & phi width dimensions
ktheta = np.ones((wtheta, wtheta))/wtheta**2
kphi = np.ones((wphi , wphi ))/wphi **2
# perform mean square convolution and mask if above theta & phi thresholds
thetamaskchunk = convolve2d(thetachunk, ktheta, 'same',
boundary='symm')**2 > ttheta
phimaskchunk = convolve2d(phichunk, kphi, 'same',
boundary='symm')**2 > tphi
anglemaskchunk = thetamaskchunk | phimaskchunk
# if aliased seabed, mask Sv above the Sv median of angle-masked regions
if anglemaskchunk.any():
Svmedian_anglemasked = log(np.nanmedian(lin(Svchunk[anglemaskchunk])))
if np.isnan(Svmedian_anglemasked):
Svmedian_anglemasked = np.inf
if Svmedian_anglemasked < tSv:
Svmedian_anglemasked = tSv
Svmaskchunk = Svchunk > Svmedian_anglemasked
# label connected items in Sv mask
items = nd.label(Svmaskchunk, nd.generate_binary_structure(2,2))[0]
# get items intercepted by angle mask (likely, the seabed)
intercepted = list(set(items[anglemaskchunk]))
if 0 in intercepted:
intercepted.remove(intercepted==0)
# combine angle-intercepted items in a single mask
maskchunk = np.zeros(Svchunk.shape, dtype=bool)
for i in intercepted:
maskchunk = maskchunk | (items==i)
# add data above r0 and below r1 (removed in first step)
above = np.zeros((r0, maskchunk.shape[1]), dtype=bool)
below = np.zeros((len(r) - r1, maskchunk.shape[1]), dtype=bool)
mask = np.r_[above, maskchunk, below]
anglemask = np.r_[above, anglemaskchunk, below] # TODO remove
# return empty mask if aliased-seabed was not detected in Theta & Phi
else:
mask = np.zeros_like(Sv, dtype=bool)
return mask, anglemask | a4a8a925feaabad0002c36bc945aa74a99e30ade | 12,256 |
def _excitation_operator( # pylint: disable=invalid-name
edge_list: np.ndarray, p: int, q: int, h1_pq: float
) -> SparsePauliOp:
"""Map an excitation operator to a Pauli operator.
Args:
edge_list: representation of graph specifying neighboring qubits.
p: First Fermionic-mode index.
q: Second Fermionic-mode index. You must ensure that p < q.
h1_pq: Numerical coefficient of the term.
Returns:
The result of the Fermionic to Pauli operator mapping.
""" # pylint: disable=missing-raises-doc
if p >= q:
raise ValueError("Expected p < q, got p = ", p, ", q = ", q)
b_a = _edge_operator_bi(edge_list, p)
b_b = _edge_operator_bi(edge_list, q)
a_ab = _edge_operator_aij(edge_list, p, q)
return (-1j * 0.5 * h1_pq) * ((b_b & a_ab) + (a_ab & b_a)) | 0ae0ace12884c507977cf2e555a354c80f83e7ad | 12,257 |
def send_message( message, node, username, password, resource, max_attempts=1 ):
""" broadcast this message thru lvalert """
tmpfilename = "tmpfile.json"
tmpfile = open(tmpfilename, "w")
tmpfile.write( message )
tmpfile.close()
cmd = "lvalert_send -a %s -b %s -r %s -n %s -m %d --file %s"%(username, password, resource, node, max_attempts, tmpfilename)
return sp.Popen(cmd.split()).wait() | 5ebec2f3487b431a6d1131b11c8ab2d672308b48 | 12,258 |
def create_bcs(field_to_subspace, Lx, Ly, solutes,
V_boundary,
enable_NS, enable_PF, enable_EC,
**namespace):
""" The boundary conditions are defined in terms of field. """
boundaries = dict(wall=[Wall()])
bcs = dict(
wall=dict()
)
bcs_pointwise = dict()
noslip = Fixed((0., 0.))
# Navier-Stokes
if enable_NS:
bcs["wall"]["u"] = noslip
bcs_pointwise["p"] = (0., "x[0] < DOLFIN_EPS && x[1] < DOLFIN_EPS")
# Electrochemistry
if enable_EC:
bcs["wall"]["V"] = Fixed(V_boundary)
return boundaries, bcs, bcs_pointwise | d1e72d30404ee68b877c6761a6309884e3054b6c | 12,259 |
def get_response_rows(response, template):
"""
Take in a list of responses and covert them to SSE.Rows based on the column type specified in template
The template should be a list of the form: ["str", "num", "dual", ...]
For string values use: "str"
For numeric values use: "num"
For dual values: "dual"
"""
response_rows = []
# For each row in the response list
for row in response:
i = 0
this_row = []
if len(template) > 1:
# For each column in the row
for col in row:
# Convert values to type SSE.Dual according to the template list
if template[i] == "str":
if col is None:
col = "\x00"
elif type(col) is not str:
col = "{0:.5f}".format(col)
this_row.append(SSE.Dual(strData=col))
elif template[i] == "num":
this_row.append(SSE.Dual(numData=col))
elif template[i] == "dual":
this_row.append(SSE.Dual(strData=col, numData=col))
i = i + 1
else:
# Convert values to type SSE.Dual according to the template list
if template[0] == "str":
if row is None:
row = "\x00"
elif type(row) is not str:
row = "{0:.5f}".format(row)
this_row.append(SSE.Dual(strData=row))
elif template[0] == "num":
this_row.append(SSE.Dual(numData=row))
elif template[0] == "dual":
this_row.append(SSE.Dual(strData=row, numData=row))
# Group columns into a iterable and add to the the response_rows
response_rows.append(iter(this_row))
# Values are then structured as SSE.Rows
response_rows = [SSE.Row(duals=duals) for duals in response_rows]
return response_rows | c3c3e4bf53929895959948836a77893b2f961221 | 12,260 |
def stations_within_radius(stations, centre, r):
"""function that returns a list of all stations (type MonitoringStation)
within radius r of a geographic coordinate x."""
close_stations = []
for station in stations:
if haversine(station.coord, centre) < float(r):
close_stations.append(station)
return close_stations | 9877020f56f25435d1dad6fae32ebbae0e7cfdaf | 12,261 |
import os
import json
def get_pack_display_name(pack_id: str) -> str:
"""
Gets the display name of the pack from the pack ID.
:param pack_id: ID of the pack.
:return: Name found in the pack metadata, otherwise an empty string.
"""
metadata_path = os.path.join(PACKS_FULL_PATH, pack_id, PACK_METADATA_FILE)
if pack_id and os.path.isfile(metadata_path):
with open(metadata_path, 'r') as json_file:
pack_metadata = json.load(json_file)
return pack_metadata.get('name')
return '' | feac51303f2a2b99dd2890cfc9fade74b45324db | 12,262 |
import os
import tqdm
def tflite_conversion(model, tflite_path, conversion_type="fp32"):
"""Performs tflite conversion (fp32, int8)."""
# Prepare model for inference
model = prepare_model_for_inference(model)
create_directories([os.path.dirname(tflite_path)])
converter = tf.lite.TFLiteConverter.from_keras_model(model)
def representative_dataset_gen(input_dim):
calib_data = []
for data in tqdm(training_data.take(1000), desc="model calibration"):
input_data = data[0]
for i in range(input_data.shape[1] // input_dim):
input_chunks = [
input_data[:, i * input_dim: (i + 1) * input_dim, :, ]
]
for chunk in input_chunks:
calib_data.append([chunk])
return lambda: [
(yield data) for data in tqdm(calib_data, desc="model calibration")
]
if conversion_type == "int8":
log("Quantizing Model")
(training_data, training_num_steps) = get_data("train", overlap=True)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
converter.representative_dataset = representative_dataset_gen(model.input_shape[1])
tflite_model = converter.convert()
open(tflite_path, "wb").write(tflite_model) | 9764243cfd037424a19bda35c2d20dff472851cf | 12,263 |
import torch
def jacobian(model, x, output_class):
"""
Compute the output_class'th row of a Jacobian matrix. In other words,
compute the gradient wrt to the output_class.
:param model: forward pass function.
:param x: input tensor.
:param output_class: the output_fz class we want to compute the gradients.
:return: output_class'th row of the Jacobian matrix wrt x.
"""
xvar = replicate_input_withgrad(x)
scores = model(xvar)
# compute gradients for the class output_class wrt the input x
# using backpropagation
torch.sum(scores[:, output_class]).backward()
return xvar.grad.detach().clone() | 07364b8cc58d3ba51431d07e6bf5d164da7ab380 | 12,264 |
def render_face_orthographic(mesh, background=None):
"""
mesh location should be normalized
:param mesh:
:param background:
:return:
"""
mesh.visual.face_colors = np.array([0.05, 0.1, 0.2, 1])
mesh = pyrender.Mesh.from_trimesh(mesh, smooth=False)
# mesh = pyrender.Mesh.from_trimesh(mesh)
scene.add(mesh, pose=np.eye(4))
camera_pose = np.eye(4)
# camera_pose[0, 3] = 1
# camera_pose[1, 3] = 1
# camera_pose[2, 3] = -10
# camera_pose[0, 0] = 1
# camera_pose[1, 1] = -1
# camera_pose[2, 2] = -1
#
# camera = pyrender.OrthographicCamera(xmag=1, ymag=1, zfar=100)
camera_pose[0, 3] = 1
camera_pose[1, 3] = 1
camera_pose[2, 3] = 10
camera_pose[0, 0] = 1
camera_pose[1, 1] = 1
camera_pose[2, 2] = 1
camera = pyrender.OrthographicCamera(xmag=1, ymag=1, zfar=100)
scene.add(camera, pose=camera_pose)
light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=5.0)
scene.add(light, pose=camera_pose)
color, depth = r.render(scene)
scene.clear()
# print(color.shape)
color = np.array(color)
color = color[::-1]
if background is not None:
new_color = np.array(background)
new_color[color != 255] = color[color != 255]
color = new_color
return color | 21cc7adaaebec7d8114f8192ccdc133075a19bc3 | 12,265 |
def check_and_join(phrase, symbols=None, filter=None):
"""
Joins characters of ``phrase`` and if ``symbols`` is given, raises
an error if any character in ``phrase`` is not in ``symbols``.
Parameters
==========
phrase
String or list of strings to be returned as a string.
symbols
Iterable of characters allowed in ``phrase``.
If ``symbols`` is ``None``, no checking is performed.
Examples
========
>>> from sympy.crypto.crypto import check_and_join
>>> check_and_join('a phrase')
'a phrase'
>>> check_and_join('a phrase'.upper().split())
'APHRASE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE', filter=True)
'ARAE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE')
Traceback (most recent call last):
...
ValueError: characters in phrase but not symbols: "!HPS"
"""
rv = ''.join(''.join(phrase))
if symbols is not None:
symbols = check_and_join(symbols)
missing = ''.join(list(sorted(set(rv) - set(symbols))))
if missing:
if not filter:
raise ValueError(
'characters in phrase but not symbols: "%s"' % missing)
rv = translate(rv, None, missing)
return rv | 64ddcedf19ecba17b169e4bc3a3c205fe3192eb7 | 12,266 |
from typing import Optional
from typing import Set
def rigs_from_file(filepath: str, sensor_ids: Optional[Set[str]] = None) -> kapture.Rigs:
"""
Reads rigs from CSV file.
:param filepath: input file path
:param sensor_ids: input set of valid sensor ids.
If a rig id collides one of them, raise error.
If a sensor in rig is not in sensor_ids, it is ignored.
:return: rigs
"""
# rig_id, sensor_id, qw, qx, qy, qz, tx, ty, tz
rigs = kapture.Rigs()
with open(filepath) as file:
table = table_from_file(file)
for rig_id, sensor_id, qw, qx, qy, qz, tx, ty, tz in table:
if sensor_ids is not None and rig_id in sensor_ids:
raise ValueError(f'collision between a sensor ID and rig ID ({rig_id})')
rotation = float_array_or_none([qw, qx, qy, qz])
translation = float_array_or_none([tx, ty, tz])
pose = kapture.PoseTransform(rotation, translation)
rigs[str(rig_id), sensor_id] = pose
if sensor_ids is not None:
# expunge all undesired sensors
rig_ids = set(rigs)
for rig_id in rig_ids:
for sensor_id in set(rigs[rig_id]):
if sensor_id not in sensor_ids and sensor_id not in rig_ids:
logger.debug(f'dropping sensor {sensor_id} from rig {rig_id} because it is unknown sensor.')
del rigs[rig_id][sensor_id]
return rigs | c7fffcf28df54fb345b2638c02d79f40256cbb78 | 12,267 |
def drawLaneOnImage(img):
"""
Find and draw the lane lines on the image `img`.
"""
left_fit, right_fit, left_fit_m, right_fit_m, _, _, _, _, _ = findLines(img)
output = drawLine(img, left_fit, right_fit)
return cv2.cvtColor( output, cv2.COLOR_BGR2RGB ) | aa42679c7ff8f90b906d8cf74af9207edeadc430 | 12,268 |
def enthalpy_diff(SA, CT, p_shallow, p_deep):
"""
Calculates the difference of the specific enthalpy of seawater between
two different pressures, p_deep (the deeper pressure) and p_shallow (the
shallower pressure), at the same values of SA and CT. This function uses
the computationally-efficient 48-term expression for density in terms of
SA, CT and p (McDougall et al., 2011). The output (enthalpy_diff_CT) is
the specific enthalpy evaluated at (SA, CT, p_deep) minus the specific
enthalpy at (SA, CT, p_shallow).
Parameters
----------
SA : array_like
Absolute Salinity [g/kg]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p_shallow : array_like
lower sea pressure [dbar]
p_deep : array_like
upper sea pressure [dbar]
Returns
-------
enthalpy_diff : array_like
difference of specific enthalpy [J/kg]
(deep minus shallow)
Notes
-----
The 48-term equation has been fitted in a restricted range of parameter
space, and is most accurate inside the "oceanographic funnel" described in
McDougall et al. (2011). The GSW library function "infunnel(SA, CT, p)" is
available to be used if one wants to test if some of one's data lies
outside this "funnel".
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqns. (3.32.2) and (A.30.6).
.. [2] McDougall T.J., P.M. Barker, R. Feistel and D.R. Jackett, 2011: A
computationally efficient 48-term expression for the density of
seawater in terms of Conservative Temperature, and related properties
of seawater.
"""
SA = np.maximum(SA, 0)
sqrtSA = np.sqrt(SA)
a0 = (v21 + CT * (v22 + CT * (v23 + CT * (v24 + v25 * CT))) + SA *
(v26 + CT * (v27 + CT * (v28 + CT * (v29 + v30 * CT))) + v36 * SA +
sqrtSA * (v31 + CT * (v32 + CT * (v33 + CT * (v34 + v35 * CT))))))
a1 = v37 + CT * (v38 + CT * (v39 + v40 * CT)) + SA * (v41 + v42 * CT)
a2 = v43 + CT * (v44 + v45 * CT + v46 * SA)
a3 = v47 + v48 * CT
b0 = (v01 + CT * (v02 + CT * (v03 + v04 * CT)) + SA * (v05 + CT * (v06 +
v07 * CT) + sqrtSA * (v08 + CT * (v09 + CT * (v10 + v11 * CT)))))
b1 = 0.5 * (v12 + CT * (v13 + v14 * CT) + SA * (v15 + v16 * CT))
b2 = v17 + CT * (v18 + v19 * CT) + v20 * SA
b1sq = b1 * b1
sqrt_disc = np.sqrt(b1sq - b0 * b2)
N = a0 + (2 * a3 * b0 * b1 / b2 - a2 * b0) / b2
M = a1 + (4 * a3 * b1sq / b2 - a3 * b0 - 2 * a2 * b1) / b2
A = b1 - sqrt_disc
B = b1 + sqrt_disc
delta_p = p_deep - p_shallow
p_sum = p_deep + p_shallow
part1 = b0 + p_shallow * (2 * b1 + b2 * p_shallow)
part2 = (B + b2 * p_deep) * (A + b2 * p_shallow)
part3 = (N * b2 - M * b1) / (b2 * (B - A))
# This function calculates enthalpy_diff using the computationally
# efficient 48-term expression for density in terms of SA, CT and p. If one
# wanted to compute the enthalpy difference using the full TEOS-10 Gibbs
# function, the following lines of code will enable this.
# pt = pt_from_CT(SA, CT)
# t_shallow = pt_from_t(SA, pt, 0, p_shallow)
# t_deep = pt_from_t(SA, pt, 0, p_deep)
# enthalpy_diff = (enthalpy_t_exact(SA, t_deep, p_deep) -
# enthalpy_t_exact(SA, t_shallow, p_shallow))
# or call the following, it is identical to the lines above.
# enthalpy_diff = enthalpy_diff_CT_exact(SA, CT, p_shallow, p_deep)
return db2Pascal * (delta_p * (a2 - 2 * a3 * b1 / b2 + 0.5 * a3 * p_sum) /
b2 + (M / (2 * b2)) *
np.log(1 + delta_p * (2 * b1 + b2 * p_sum) / part1) +
part3 * np.log(1 + delta_p * b2 * (B - A) / part2)) | 4a7251f41b073127cf1a2882614986987f085c30 | 12,269 |
import numpy
def ss(a, axis=0):
### taken from SciPy
"""Squares each value in the passed array, adds these squares, and
returns the result.
Parameters
----------
a : array
axis : int or None
Returns
-------
The sum along the given axis for (a*a).
"""
a, axis = _chk_asarray(a, axis)
return numpy.sum(a*a, axis) | 75569fa96fd866f20b57c5f5c52a38e1b9663968 | 12,270 |
def Journaling_TypeInfo():
"""Journaling_TypeInfo() -> RTTI"""
return _DataModel.Journaling_TypeInfo() | 24cdb273b6463874e71668a304af3a13305fff24 | 12,271 |
def _maven_artifact(
group,
artifact,
version,
ownership_tag = None,
packaging = None,
classifier = None,
exclusions = None,
neverlink = None,
testonly = None,
tags = None,
flatten_transitive_deps = None,
aliases = None):
"""Defines maven artifact by coordinates.
Args:
group: The Maven artifact coordinate group name (ex: "com.google.guava").
artifact: The Maven artifact coordinate artifact name (ex: "guava").
version: The Maven artifact coordinate version name (ex: "1.20.1").
ownership_tag: 3rd party dependency owner responsible for its maintenance.
packaging:The Maven artifact coordinate packaging name (ex: "jar").
classifier: The Maven artifact coordinate classifier name (ex: "jdk11").
exclusions: Artifact dependencies to be excluded from resolution closure.
neverlink: neverlink value to set,
testonly: testonly value to set.
tags: Target tags.
flatten_transitive_deps: Define all transitive deps as direct deps.
aliases: aliases that will point to this dep.
"""
maven_artifact = {}
maven_artifact["group"] = group
maven_artifact["artifact"] = artifact
maven_artifact["version"] = version
maven_artifact["aliases"] = aliases
maven_artifact["tags"] = tags
maven_artifact["flatten_transitive_deps"] = flatten_transitive_deps
if packaging != None:
maven_artifact["packaging"] = packaging
if classifier != None:
maven_artifact["classifier"] = classifier
if exclusions != None:
maven_artifact["exclusions"] = exclusions
if neverlink != None:
maven_artifact["neverlink"] = neverlink
if testonly != None:
maven_artifact["testonly"] = testonly
if ownership_tag != None:
maven_artifact["ownership_tag"] = ownership_tag
return maven_artifact | 9f97cd8cadfc3ad1365cb6d291634a9362fea4e8 | 12,272 |
import sys
def docmdnf(cmd):
"""Execute a command."""
if flag_echo:
sys.stderr.write("executing: " + cmd + "\n")
if flag_dryrun:
return 0
return u.docmdnf(cmd) | c21d06c34a913d62995447392dd62d2524b1253e | 12,273 |
def get_anchors(n):
"""Get a list of NumPy arrays, each of them is an anchor node set"""
m = int(np.log2(n))
anchor_set_id = []
for i in range(m):
anchor_size = int(n / np.exp2(i + 1))
for _ in range(m):
anchor_set_id.append(np.random.choice(n, size=anchor_size, replace=False))
return anchor_set_id | 4adbaa291740ab3d9cb0a3d6b48c39665d8d5b06 | 12,274 |
def diag_gaussian_log_likelihood(z, mu=0.0, logvar=0.0):
"""Log-likelihood under a Gaussian distribution with diagonal covariance.
Returns the log-likelihood for each dimension. One should sum the
results for the log-likelihood under the full multidimensional model.
Args:
z: The value to compute the log-likelihood.
mu: The mean of the Gaussian
logvar: The log variance of the Gaussian.
Returns:
The log-likelihood under the Gaussian model.
"""
return -0.5 * (logvar + np.log(2*np.pi) + \
tf.square((z-mu)/tf.exp(0.5*logvar))) | 7265103ddfc5c521fd9612524413cd15e237be9b | 12,275 |
def _filter_option_to_config_setting(flt, setting):
"""
Encapsulates the logic for associating a filter database option with the filter setting from relay_config
:param flt: the filter
:param setting: the option deserialized from the database
:return: the option as viewed from relay_config
"""
if setting is None:
raise ValueError("Could not find filter state for filter {0}."
" You need to register default filter state in projectoptions.defaults.".format(flt.spec.id))
is_enabled = setting != '0'
ret_val = {
'is_enabled': is_enabled
}
# special case for legacy browser.
# If the number of special cases increases we'll have to factor this functionality somewhere
if flt.spec.id == FilterStatKeys.LEGACY_BROWSER:
if is_enabled:
if setting == '1':
# old style filter
ret_val['default_filter'] = True
else:
# new style filter, per legacy browser type handling
# ret_val['options'] = setting.split(' ')
ret_val['options'] = list(setting)
return ret_val | 41694d340285b722daf91fb0badeb1ec33eb0587 | 12,276 |
def get_svg(accession, **kwargs):
"""
Returns a HMM sequence logo in SVG format.
Parameters
----------
accession : str
Pfam accession for desired HMM.
**kwargs :
Additional arguments are passed to :class:`LogoPlot`.
"""
logoplot = plot.LogoPlot(accession, **kwargs)
svg = logoplot.get_svg()
return svg | 952c6afa4d63f46be579cd70a4e2756b061b9f9b | 12,277 |
def wl_to_en( l ):
"""
Converts a wavelength, given in nm, to an energy in eV.
:param l: The wavelength to convert, in nm.
:returns: The corresponding energy in eV.
"""
a = phys.physical_constants[ 'electron volt-joule relationship' ][ 0 ] # J
return phys.Planck* phys.c/( a* l* 1e-9 ) | a9d428d7aed3a6c88a1906e026649ee74f700c81 | 12,278 |
from typing import Optional
def get_local_address_reaching(dest_ip: IPv4Address) -> Optional[IPv4Address]:
"""Get address of a local interface within same subnet as provided address."""
for iface in netifaces.interfaces():
for addr in netifaces.ifaddresses(iface).get(netifaces.AF_INET, []):
iface = IPv4Interface(addr["addr"] + "/" + addr["netmask"])
if dest_ip in iface.network:
return iface.ip
return None | ee7061633d72c3b0ac578baf6119e5437395ce17 | 12,279 |
def atSendCmdTest(cmd_name: 'str', params: 'list'):
""" 发送测试命令,方便调试 ATCore
"""
func_name = 'atSendCmdTest'
atserial.ATraderCmdTest_send(cmd_name, params)
res = recv_serial(func_name)
atReturnChecker(func_name, res.result)
return res.listResult | 4cbe127ea7291893d64d8554c5a405c24085320a | 12,280 |
def unlabeled_balls_in_unlabeled_boxes(balls, box_sizes):
"""
OVERVIEW
This function returns a generator that produces all distinct distributions of
indistinguishable balls among indistinguishable boxes, with specified box
sizes (capacities). This is a generalization of the most common formulation
of the problem, where each box is sufficiently large to accommodate all of
the balls. It might be asked, 'In what sense are the boxes indistinguishable
if they have different capacities?' The answer is that the box capacities
must be considered when distributing the balls, but once the balls have been
distributed, the identities of the boxes no longer matter.
CONSTRUCTOR INPUTS
n: the number of balls
box_sizes: This argument is a list of length 1 or greater. The length of
the list corresponds to the number of boxes. `box_sizes[i]` is a positive
integer that specifies the maximum capacity of the ith box. If
`box_sizes[i]` equals `n` (or greater), the ith box can accommodate all `n`
balls and thus effectively has unlimited capacity.
NOTE
For `unlabeled_balls_in_unlabeled_boxes`, the order of the elements of the
`box_sizes` list is unimportant because the code will sort it into non-
increasing order before any other processing is done.
"""
if not isinstance(balls, int):
raise TypeError("balls must be a non-negative integer.")
if balls < 0:
raise ValueError("balls must be a non-negative integer.")
if not isinstance(box_sizes, (list, tuple)):
raise ValueError("box_sizes must be a non-empty list or tuple.")
capacity= 0
for size in box_sizes:
if not isinstance(size, int):
raise TypeError("box_sizes must contain only positive integers.")
if size < 1:
raise ValueError("box_sizes must contain only positive integers.")
capacity+= size
if capacity < balls:
raise ValueError("The total capacity of the boxes is less than the "
"number of balls to be distributed.")
# Sort the box sizes so that the values decrease:
box_sizes= sorted(box_sizes, reverse=True)
return _unlabeled_balls_in_unlabeled_boxes(balls, box_sizes) | 13743a7207f1d4fd2635f35c9eaef0a9acf53fa0 | 12,281 |
def get_version():
"""Extract current version from __init__.py."""
with open("morphocell/__init__.py", encoding="utf-8") as fid:
for line in fid:
if line.startswith("__version__"):
VERSION = line.strip().split()[-1][1:-1]
break
return VERSION | 69a00c2e5544dfd8d86cdab8be53c17b73764aca | 12,282 |
def get_neighbor_v4_by_id(obj_id):
"""Return an NeighborV4 by id.
Args:
obj_id: Id of NeighborV4
"""
try:
obj = NeighborV4.get_by_pk(id=obj_id)
except NeighborV4NotFoundError as e:
raise NeighborV4DoesNotExistException(str(e))
return obj | 30b00e2fb1f954299a331ce6b198f1e5465122e5 | 12,283 |
from typing import Dict
def get_resources_json_obj(resource_name: str) -> Dict:
"""
Get a JSON object of a specified resource.
:param resource_name: The name of the resource.
:returns: The JSON object (in the form of a dictionary).
:raises Exception: An exception is raised if the specified resources does
not exist.
"""
resource_map = _get_resources(_get_resources_json()["resources"])
if resource_name not in resource_map:
raise Exception(
"Error: Resource with name '{}' does not exist".format(
resource_name
)
)
return resource_map[resource_name] | ce625ecbaad0ec4cea93da78c9c213e37ffef3ed | 12,284 |
def skip_if(predicate, reason=None):
"""Skip a test if predicate is true."""
reason = reason or predicate.__name__
def decorate(fn):
fn_name = fn.__name__
def maybe(*args, **kw):
if predicate():
msg = "'%s' skipped: %s" % (fn_name, reason)
raise SkipTest(msg)
else:
return fn(*args, **kw)
return update_wrapper(maybe, fn)
return decorate | 56089515f8cae4f977b1eac96a8de6c6ee59e711 | 12,285 |
from pyrap.measures import measures
from pyrap.quanta import quantity as q
def synthesized_uvw(ants, time, phase_dir, auto_correlations):
"""
Synthesizes new UVW coordinates based on time according to
NRAO CASA convention (same as in fixvis)
User should check these UVW coordinates carefully:
if time centroid was used to compute
original uvw coordinates the centroids
of these new coordinates may be wrong, depending on whether
data timesteps were heavily flagged.
"""
pytest.importorskip('pyrap')
dm = measures()
epoch = dm.epoch("UT1", q(time[0], "s"))
ref_dir = dm.direction("j2000",
q(phase_dir[0], "rad"),
q(phase_dir[1], "rad"))
ox, oy, oz = ants[0]
obs = dm.position("ITRF", q(ox, "m"), q(oy, "m"), q(oz, "m"))
# Setup local horizon coordinate frame with antenna 0 as reference position
dm.do_frame(obs)
dm.do_frame(ref_dir)
dm.do_frame(epoch)
ant1, ant2 = np.triu_indices(ants.shape[0],
0 if auto_correlations else 1)
ant1 = ant1.astype(np.int32)
ant2 = ant2.astype(np.int32)
ntime = time.shape[0]
nbl = ant1.shape[0]
rows = ntime * nbl
uvw = np.empty((rows, 3), dtype=np.float64)
# For each timestep
for ti, t in enumerate(time):
epoch = dm.epoch("UT1", q(t, "s"))
dm.do_frame(epoch)
ant_uvw = np.zeros_like(ants)
# Calculate antenna UVW positions
for ai, (x, y, z) in enumerate(ants):
bl = dm.baseline("ITRF",
q([x, ox], "m"),
q([y, oy], "m"),
q([z, oz], "m"))
ant_uvw[ai] = dm.to_uvw(bl)["xyz"].get_value()[0:3]
# Now calculate baseline UVW positions
# noting that ant1 - ant2 is the CASA convention
base = ti*nbl
uvw[base:base + nbl, :] = ant_uvw[ant1] - ant_uvw[ant2]
return ant1, ant2, uvw | f3261545f85981d353a05acf3176bf0317ea4c86 | 12,286 |
from datetime import datetime
def execute_pso_strategy(df, options, topology, retrain_params, commission, data_name, s_test, e_test, iters=100, normalization='exponential'):
"""
Execute particle swarm optimization strategy on data history contained in df
:param df: dataframe with historical data
:param options: dict with the following parameters
- c1 - cognitive parameter with which the particle follows its personal best
- c2 - social parameter with which the particle follows the swarm's global best position
- w - parameter that controls the inertia of the swarm's movement
:param commision: commission to be paid on each operation
:param data_name: quote data name
:param start_date: start date of simulation
:param end_date: end date of simulation
:return:
- PSO_Cerebro - execution engine
- PSO_Strategy - pso strategy instance
"""
print_execution_name("Estrategia: particle swar optimization")
strategy_name = 'particle_swarm_optimization'
info = {
'Mercado': data_name,
'Estrategia': strategy_name,
'Fecha inicial': s_test,
'Fecha final': e_test
}
# ------------ Obtenemos los conjuntos de train y test ------------ #
s_test_date = datetime.strptime(s_test, '%Y-%m-%d')
s_train = s_test_date.replace(year = s_test_date.year - 2)
#s_train = s_test_date - timedelta(days=180)
e_train = s_test_date - timedelta(days=1)
gen_representation = GeneticRepresentation(df, s_train, e_train, s_test, e_test)
# ------------ Fijamos hiperparámetros ------------ #
n_particles = topology['particles']
num_neighbours = topology['neighbours']
minkowski_p_norm = 2
options['k'] = num_neighbours
options['p'] = minkowski_p_norm
dimensions=len(gen_representation.moving_average_rules)+2
if normalization == 'exponential':
max_bound = 2.0 * np.ones(dimensions-2)
min_bound = -max_bound
elif normalization == 'l1':
max_bound = 2.0 * np.ones(dimensions-2)
min_bound = np.zeros(dimensions-2)
max_bound = np.append(max_bound, [0.9, 0.0])
min_bound = np.append(min_bound, [0.0, -0.9])
bounds = (min_bound, max_bound)
# Call instance of PSO
optimizer = ps.single.LocalBestPSO(n_particles=n_particles,
dimensions=dimensions,
options=options,
bounds=bounds,
static=True)
# Perform optimization
kwargs={'from_date': s_train, 'to_date': e_train, 'normalization': normalization}
best_cost, best_pos = optimizer.optimize(gen_representation.cost_function,
iters=iters,
n_processes=2,
**kwargs)
# Create an instance from CombinedSignalStrategy class and assign parameters
PSO_Strategy = CombinedSignalStrategy
w, buy_threshold, sell_threshold = get_split_w_threshold(best_pos)
"""
print("Umbral de compra: ", buy_threshold)
print("Umbral de venta: ", sell_threshold)
crosses = ["(" + str(cross[0]) + ", " + str(cross[1]) + ")" for cross in gen_representation.moving_average_rules]
y_pos = np.arange(len(crosses))
plt.bar(y_pos, w)
plt.xticks(y_pos, crosses)
plt.xticks(rotation='vertical')
plt.subplots_adjust(top=0.98, bottom=0.2, left=0.08, right=0.98, hspace=0.0, wspace=0.0)
plt.show()
"""
PSO_Strategy.w = w
PSO_Strategy.buy_threshold = buy_threshold
PSO_Strategy.sell_threshold = sell_threshold
PSO_Strategy.moving_average_rules = gen_representation.moving_average_rules
PSO_Strategy.moving_averages = gen_representation.moving_averages_test
PSO_Strategy.optimizer = optimizer
PSO_Strategy.gen_representation = gen_representation
PSO_Strategy.normalization = normalization
PSO_Strategy.retrain_params = retrain_params
df_test = gen_representation.df_test
df_train = gen_representation.df_train
PSO_Cerebro = execute_strategy(PSO_Strategy, df_test, commission, info, retrain_params)
return PSO_Cerebro, PSO_Strategy | a9a4fffe335ab34ca584a4fe1d3b6116a2a7866c | 12,287 |
def env_get(d, key, default, decoders=decoders, required=None):
"""
Look up ``key`` in ``d`` and decode it, or return ``default``.
"""
if required is None:
required = isinstance(default, type)
try:
value = d[key]
except KeyError:
if required:
raise
return default
dt = (default if default is None or isinstance(default, type)
else type(default))
for decoder in decoders:
if (decoder.decodes_to_type(dt) and
decoder.decodes_from_value(value)
):
try:
return decoder.decode(value)
except Exception as e:
logger.error("%s couldn't convert %s: %s: %s",
decoder.__class__.__name__, key,
e.__class__.__name__, e)
raise
raise ValueError("no suitable env decoder for {}".format(key)) | 844c6ac9931af97bdc92da6d5014659c3600d50e | 12,288 |
from sympy.functions.elementary.complexes import re, im
from .add import Add
from re import S
def get_integer_part(expr: 'Expr', no: int, options: OPT_DICT, return_ints=False) -> \
tUnion[TMP_RES, tTuple[int, int]]:
"""
With no = 1, computes ceiling(expr)
With no = -1, computes floor(expr)
Note: this function either gives the exact result or signals failure.
"""
# The expression is likely less than 2^30 or so
assumed_size = 30
result = evalf(expr, assumed_size, options)
if result is S.ComplexInfinity:
raise ValueError("Cannot get integer part of Complex Infinity")
ire, iim, ire_acc, iim_acc = result
# We now know the size, so we can calculate how much extra precision
# (if any) is needed to get within the nearest integer
if ire and iim:
gap = max(fastlog(ire) - ire_acc, fastlog(iim) - iim_acc)
elif ire:
gap = fastlog(ire) - ire_acc
elif iim:
gap = fastlog(iim) - iim_acc
else:
# ... or maybe the expression was exactly zero
if return_ints:
return 0, 0
else:
return None, None, None, None
margin = 10
if gap >= -margin:
prec = margin + assumed_size + gap
ire, iim, ire_acc, iim_acc = evalf(
expr, prec, options)
else:
prec = assumed_size
# We can now easily find the nearest integer, but to find floor/ceil, we
# must also calculate whether the difference to the nearest integer is
# positive or negative (which may fail if very close).
def calc_part(re_im: 'Expr', nexpr: MPF_TUP):
_, _, exponent, _ = nexpr
is_int = exponent == 0
nint = int(to_int(nexpr, rnd))
if is_int:
# make sure that we had enough precision to distinguish
# between nint and the re or im part (re_im) of expr that
# was passed to calc_part
ire, iim, ire_acc, iim_acc = evalf(
re_im - nint, 10, options) # don't need much precision
assert not iim
size = -fastlog(ire) + 2 # -ve b/c ire is less than 1
if size > prec:
ire, iim, ire_acc, iim_acc = evalf(
re_im, size, options)
assert not iim
nexpr = ire
nint = int(to_int(nexpr, rnd))
_, _, new_exp, _ = ire
is_int = new_exp == 0
if not is_int:
# if there are subs and they all contain integer re/im parts
# then we can (hopefully) safely substitute them into the
# expression
s = options.get('subs', False)
if s:
doit = True
# use strict=False with as_int because we take
# 2.0 == 2
for v in s.values():
try:
as_int(v, strict=False)
except ValueError:
try:
[as_int(i, strict=False) for i in v.as_real_imag()]
continue
except (ValueError, AttributeError):
doit = False
break
if doit:
re_im = re_im.subs(s)
re_im = Add(re_im, -nint, evaluate=False)
x, _, x_acc, _ = evalf(re_im, 10, options)
try:
check_target(re_im, (x, None, x_acc, None), 3)
except PrecisionExhausted:
if not re_im.equals(0):
raise PrecisionExhausted
x = fzero
nint += int(no*(mpf_cmp(x or fzero, fzero) == no))
nint = from_int(nint)
return nint, INF
re_, im_, re_acc, im_acc = None, None, None, None
if ire:
re_, re_acc = calc_part(re(expr, evaluate=False), ire)
if iim:
im_, im_acc = calc_part(im(expr, evaluate=False), iim)
if return_ints:
return int(to_int(re_ or fzero)), int(to_int(im_ or fzero))
return re_, im_, re_acc, im_acc | 6e00897786581134480a6d7fd16b559760a1a4e7 | 12,289 |
def inv_last_roundf(ns):
"""
ns -> States of nibbles
Predict the states of nibbles after passing through the inverse last round
of SomeCipher. Refer to `last_roundf()` for more details.
"""
return inv_shift_row(ns) | 12561f9815ecad4cd08909be1e0c77dd61500cce | 12,290 |
def get_screen(name, layer=None):
"""
:doc: screens
Returns the ScreenDisplayable with the given `name` on layer. `name`
is first interpreted as a tag name, and then a screen name. If the
screen is not showing, returns None.
This can also take a list of names, in which case the first screen
that is showing is returned.
This function can be used to check if a screen is showing::
if renpy.get_screen("say"):
text "The say screen is showing."
else:
text "The say screen is hidden."
"""
if layer is None:
layer = get_screen_layer(name)
if isinstance(name, basestring):
name = (name,)
sl = renpy.exports.scene_lists()
for tag in name:
sd = sl.get_displayable_by_tag(layer, tag)
if sd is not None:
return sd
for tag in name:
sd = sl.get_displayable_by_name(layer, (tag,))
if sd is not None:
return sd
return None | a6496e453a80f1ad286bbe5201aba92fa922794b | 12,291 |
import random
import csv
def generate_address_full(chance=None, variation=False, format=1):
"""
Function to generate the full address of the profile.
Args:
chance: Integer between 1-100 used for realistic variation. (not required)
variation: Boolean value indicating whether variation is requested. (optional)
format: String value used to indicate required format. (optional)
Options include:
-1 (Str value)
-2 (List value)
Returns:
The return value. String/List value containing the full address.
"""
if not chance:
chance = random.randint(1,100)
csv_file = open(canadian_data_file_name, 'r')
csv_reader = csv.reader(csv_file, delimiter=',')
random_row = random.choice(list(csv_reader))
csv_file.close()
if format == 1 or format == "1":
return "%s %s, %s, %s, %s" % (generate_street_number(row=random_row),generate_street_name(chance=chance, variation=variation,row=random_row),generate_city(chance=chance, variation=variation,row=random_row),generate_province(chance=chance, variation=variation,row=random_row),generate_postal_code(chance=chance, variation=variation,row=random_row))
elif format == 2 or format == "2":
address_list=[]
address_list.append(generate_street_number(row=random_row))
address_list.append(generate_street_name(variation,row=random_row))
address_list.append(generate_city(variation,row=random_row))
address_list.append(generate_province(variation,row=random_row))
address_list.append(generate_postal_code(variation,row=random_row))
return address_list | 364cbe0f033d0500014583c550beb36a6cb6db55 | 12,292 |
import urllib
def binder_url(repo, branch="master", filepath=None):
"""
Build a binder url. If filepath is provided, the url will be for
the specific file.
Parameters
----------
repo: str
The repository in the form "username/reponame"
branch: str, optional
The branch, default "master"
filepath: str, optional
The path to a file in the repo, e.g. dir1/dir2/notebook.ipynb
Returns
-------
str
A binder url that will launch a notebook server
"""
if filepath is not None:
fpath = urllib.parse.quote(filepath, safe="%")
return resources.BINDER_URL_TEMPLATE_WITH_FILEPATH.format(
repo, branch, fpath
)
else:
return resources.BINDER_URL_TEMPLATE_NO_FILEPATH.format(repo, branch) | f9ac9e28a1cc6b88bce788e63668f1e4a4b45f61 | 12,293 |
def _create_group_hub_without_avatar(_khoros_object, _api_url, _payload):
"""This function creates a group hub with only a JSON payload and no avatar image.
.. versionadded:: 2.6.0
:param _khoros_object: The core :py:class:`khoros.Khoros` object
:type _khoros_object: class[khoros.Khoros]
:param _api_url: The API URL to utilize in the API request
:type _api_url: str
:param _payload: The JSON payload to be used in the API request
:type _payload: dict
:returns: The API response from the POST request
:raises: :py:exc:`khoros.errors.exceptions.APIConnectionError`,
:py:exc:`khoros.errors.exceptions.POSTRequestError`
"""
_headers = {'content-type': 'application/json'}
_response = api.post_request_with_retries(_api_url, _payload, khoros_object=_khoros_object, headers=_headers)
return _response | 43222666b5a5f5dcec91a4fa1025278f84275e9c | 12,294 |
def ulstrip(text):
"""
Strip Unicode extended whitespace from the left side of a string
"""
return text.lstrip(unicode_extended_whitespace) | 191e0654cdab79778c64ec874bbc9b945b0ed4a3 | 12,295 |
def clone_dcm_meta(dcm):
"""
Copy an existing pydicom Dataset as a basis for saving
another image
:param dcm: the pydicom dataset to be copied
:return:
"""
newdcm = pydi.Dataset()
for k, v in dcm.items():
newdcm[k] = v
newdcm.file_meta = mk_file_meta()
newdcm.is_little_endian = True
newdcm.is_implicit_VR = False
newdcm.SOPInstanceUID = newdcm.file_meta.MediaStorageSOPInstanceUID
newdcm.SOPClassUID = newdcm.file_meta.MediaStorageSOPClassUID
return newdcm | e208ee11241b4194bb0d02357e625d0ee4f52d2e | 12,296 |
import toml
import itertools
from pathlib import Path
def load_plate(toml_path):
"""\
Parse a TOML-formatted configuration file defining how each well in a
particular plate should be interpreted.
Below is a list of the keys that are understood in the configuration file:
'xlsx_path' [string]
The path to the XLSX file containing the plate reader data, relative to
the configuration file itself. If not specified, this script will look
for a file with the same name as the configuration file, but the
'.xlsx' extension, e.g. 'abc.xlsx' if the config file is 'abc.toml'.
'template' [string]
The path to another TOML file that should be interpreted as containing
default values for all possible settings.
'notes' [string]
A string that will be printed every time the file is visualized. This
is meant to reminder the user of any details relating to this
particular experiment (e.g. mistakes) that might affect interpretation
of the data.
The following keys relate to particular wells. Each of these keys can be
specified in any of four kinds of block: [well.A1], [row.A], [col.1], and
[plate]. The [well] block allows values to be set for individual wells ('A1'
in this example). The [row] and [col] blocks allow values to be set for
whole rows and columns ('A' and '1' in these examples). The [plate] block
allows values to be set for the whole plate. The same value can be set
multiple times, in which case the value from the most specific block will
take precedence.
"""
def recursive_merge(layout, defaults, overwrite=False):
for key, default in defaults.items():
if isinstance(default, dict):
layout.setdefault(key, {})
recursive_merge(layout[key], default)
else:
if overwrite or key not in layout:
layout[key] = default
def do_load_paths(toml_path, expected_ext='.xlsx'):
toml_path = Path(toml_path).resolve()
layout = toml.load(str(toml_path))
# Resolve the path(s) to actual data.
if 'path' in layout and 'paths' in layout:
raise ValueError(f"{toml_path} specifies both 'path' and 'paths'")
elif 'path' in layout:
path = toml_path.parent / layout['path']
layout['paths'] = {'default': path}
elif 'paths' in layout:
layout['paths'] = {
toml_path.parent / x
for x in layout['paths']
}
else:
default_path = toml_path.with_suffix(expected_ext)
if default_path.exists():
layout['paths'] = {'default': default_path}
# Include a remote file if one is specified.
if 'template' in layout:
layout['template'] = toml_path.parent / layout['template']
template = do_load_paths(layout['template'])
recursive_merge(layout, template)
return layout
layout = do_load_paths(toml_path)
# Apply any row or column defaults.
if 'well' not in layout:
layout['well'] = {}
rows = layout.get('row', {})
cols = layout.get('col', {})
# Create new wells implied by the 'row' and 'col' blocks.
for row, col in itertools.product(rows, cols):
layout['well'].setdefault(f'{row}{col}', {})
# Update any existing wells.
for well in layout.get('well', {}):
row, col = well[:1], well[1:]
recursive_merge(layout['well'][well], rows.get(row, {}))
recursive_merge(layout['well'][well], cols.get(col, {}))
# Apply any plate-wide defaults.
layout.setdefault('plate', {}),
for well in layout.get('well', {}):
recursive_merge(layout['well'][well], layout['plate'])
# If the experiment has any notes, print them out.
if 'notes' in layout:
print(toml_path)
print(layout['notes'].strip())
print()
return layout | cc92a9dae783de915628984979119ca9d2b591a2 | 12,297 |
def reduce_entropy(X, axis=-1):
"""
calculate the entropy over axis and reduce that axis
:param X:
:param axis:
:return:
"""
return -1 * np.sum(X * np.log(X+1E-12), axis=axis) | 68a7d86bf0ad204d989fddceee9e4f75c77a4cb5 | 12,298 |
def compile_pbt(lr: float = 5e-3, value_weight: float = 0.5):
"""
my default: 5e-3
# SAI: 1e-4
# KataGo: per-sample learning rate of 6e-5, except 2e-5 for the first 5mm samples
"""
input_shape = (N, N, dual_net.get_features_planes())
model = dual_net.build_model(input_shape)
opt = keras.optimizers.Adam(learning_rate=lr)
model.compile(optimizer=opt,
loss={
'policy': 'categorical_crossentropy',
'value': custom_BCE_loss},
loss_weights={
'policy': 0.50,
'value': value_weight},
metrics={
'policy': keras.metrics.CategoricalAccuracy(name="move_acc"),
})
return model | e625915c55a44a8c128431ae220401c451ee69a5 | 12,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.