content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import os
def connect_to_service(service_name, client=True, env=None, region_name=None, endpoint_url=None):
"""
Generic method to obtain an AWS service client using boto3, based on environment, region, or custom endpoint_url.
"""
env = get_environment(env, region_name=region_name)
my_session = None
if CUSTOM_BOTO3_SESSION:
my_session = CUSTOM_BOTO3_SESSION
elif CREATE_NEW_SESSION_PER_BOTO3_CONNECTION:
my_session = boto3.session.Session()
else:
my_session = boto3
method = my_session.client if client else my_session.resource
if not endpoint_url:
if env.region == REGION_LOCAL:
endpoint_url = os.environ['TEST_%s_URL' % (service_name.upper())]
return method(service_name, region_name=env.region, endpoint_url=endpoint_url)
|
364bfd5d1036627187ff8ead8429b776f3656431
| 19,800 |
def hdf_diff(*args, **kwargs):
""":deprecated: use `diff_blocks` (will be removed in 1.1.1)"""
return diff_blocks(*args, **kwargs)
|
3c05a908cc32c2ba4e481ff0de41f78249e2ef02
| 19,801 |
import glob
def determine_epsilon():
"""
We follow Learning Compact Geomtric Features to compute this hyperparameter, which unfortunately we didn't use later.
"""
base_dir = '../dataset/3DMatch/test/*/03_Transformed/*.ply'
files = sorted(glob.glob(base_dir), key=natural_key)
etas = []
for eachfile in files:
pcd = o3d.io.read_point_cloud(eachfile)
pcd = pcd.voxel_down_sample(0.025)
pcd_tree = o3d.geometry.KDTreeFlann(pcd)
distances = []
for i, point in enumerate(pcd.points):
[count, vec1, vec2] = pcd_tree.search_knn_vector_3d(point, 2)
distances.append(np.sqrt(vec2[1]))
etai = np.median(distances)
etas.append(etai)
return np.median(etas)
|
a6af243ebeb37e046e9f23c86080822bff4f490d
| 19,802 |
def sort_ipv4_addresses_with_mask(ip_address_iterable):
"""
Sort IPv4 addresses in CIDR notation
| :param iter ip_address_iterable: An iterable container of IPv4 CIDR notated addresses
| :return list : A sorted list of IPv4 CIDR notated addresses
"""
return sorted(
ip_address_iterable,
key=lambda addr: (
int(addr.split('.')[0]),
int(addr.split('.')[1]),
int(addr.split('.')[2]),
int(addr.split('.')[3].split('/')[0]),
int(addr.split('.')[3].split('/')[1])
)
)
|
97517b2518b81cb8ce4cfca19c5512dae6bae686
| 19,803 |
def _subattribute_from_json(data: JsonDict) -> SubAttribute:
"""Make a SubAttribute from JSON data (deserialize)
Args:
data: JSON data received from Tamr server.
"""
cp = deepcopy(data)
d = {}
d["name"] = cp["name"]
d["is_nullable"] = cp["isNullable"]
d["type"] = from_json(cp["type"])
return SubAttribute(**d)
|
4fde9e1eb456fd42b8ad8e49ad893a62ba01eba4
| 19,804 |
def compare_asts(ast1, ast2):
"""Compare two ast trees. Return True if they are equal."""
# import leo.core.leoGlobals as g
# Compare the two parse trees.
try:
_compare_asts(ast1, ast2)
except AstNotEqual:
dump_ast(ast1, tag='AST BEFORE')
dump_ast(ast2, tag='AST AFTER')
if g.unitTesting:
raise
return False
except Exception:
g.warning(f"Unexpected exception")
g.es_exception()
return False
return True
|
32ab70f1fa31f9ae6cab4e9f5ba91ff71a9e79f8
| 19,805 |
import json
def shit():
"""Ready to go deep into the shit?
Parse --data from -X POST -H 'Content-Type: application/json'
and send it to the space background
"""
try:
body = json.loads(request.data)
except Exception as e:
abort(400, e)
if not body:
abort(400, "Missing data")
if "title" not in body:
abort(400, "Missing `title` param")
if "artist" not in body:
abort(400, "Missing `artist` param")
if "client_id" not in body:
"""client_id is used to send back
the lyriks through the Notifier aka Flash.
"""
abort(400, "Missing `client_id` param")
# send data to our Background Worker aka Iron Rogue
rogue(body["title"], body["artist"], body["client_id"])
return make_response(jsonify({
"code": 202,
"message": "request accepted and send into the shit"
}), 202)
|
bdc435566aeaafac8144775188478cb28724802b
| 19,806 |
def clear_settings(site_name): # untested - do I need/want this?
"""update settings to empty dict instead of initialized)
"""
return update_settings(site_name, {})
|
a6cbd9bc43ce5bc7159bc75d4cab8c703d73e8cd
| 19,807 |
def reorder_jmultis_det_terms(jmulti_output, constant, seasons):
"""
In case of seasonal terms and a trend term we have to reorder them to make
the outputs from JMulTi and sm2 comparable.
JMulTi's ordering is: [constant], [seasonal terms], [trend term] while
in sm2 it is: [constant], [trend term], [seasonal terms]
Parameters
----------
jmulti_output : ndarray (neqs x number_of_deterministic_terms)
constant : bool
Indicates whether there is a constant term or not in jmulti_output.
seasons : int
Number of seasons in the model. That means there are seasons-1
columns for seasonal terms in jmulti_output
Returns
-------
reordered : ndarray (neqs x number_of_deterministic_terms)
jmulti_output reordered such that the order of deterministic terms
matches that of sm2.
"""
if seasons == 0:
return jmulti_output
constant = int(constant)
const_column = jmulti_output[:, :constant]
season_columns = jmulti_output[:, constant:constant + seasons - 1].copy()
trend_columns = jmulti_output[:, constant + seasons - 1:].copy()
return np.hstack((const_column,
trend_columns,
season_columns))
|
91fd48e14addf264f00a6e898af8d934bcd84cca
| 19,808 |
def test_require_gdal_version_param_values():
"""Parameter values are allowed for all versions >= 1.0"""
for values in [('bar',), ['bar'], {'bar'}]:
@require_gdal_version('1.0', param='foo', values=values)
def a(foo=None):
return foo
assert a() is None
assert a('bar') == 'bar'
assert a(foo='bar') == 'bar'
|
de11d8f6f0720c1b3ef6aa957f8386e8436b1e73
| 19,809 |
def nav_get_element(nav_expr, side, dts, xule_context):
"""Get the element or set of elements on the from or to side of a navigation expression'
This determines the from/to elements of a navigation expression. If the navigation expression includes the from/to component, this will be evaluated.
The result can be a qname, concept or a set/list of qname or concepts.
Arguments:
nav_expr (dictionary): The navigation expression AST node
side (string): Either 'from' or 'to'.
xule_context (XuleRuleContext): The processing context
Returns:
None - indicates that the side is not in the navigation expression
set of concepts - the set of the concepts if the side evaluates to a set or list of concept/concepts
"""
if side in nav_expr:
side_value = evaluate(nav_expr[side], xule_context)
if side_value.type == 'qname':
concept = XuleProperties.get_concept(dts, side_value.value)
if concept is None:
return set()
else:
return {concept, }
elif side_value.type == 'concept':
return {side_value.value, }
elif side_value.type in ('set', 'list'):
concepts = set()
for item in side_value.value:
if item.type == 'qname':
concept = XuleProperties.get_concept(dts, item.value)
if concept is not None:
concepts.add(concept)
elif item.type == 'concept':
concepts.add(item.value)
else:
raise XuleProcessingError(_(
"In navigation, expecting a collection of concepts or concepts, but found {}.".format(
item.type)))
return concepts
else:
raise XuleProcessingError(
_("In navigation, expecting a concept or qname, but found {}.".format(side_value.type)))
else:
return None
|
db29b0c2e7832c2b386dd602c77d18a37c2c1307
| 19,810 |
def do_pivot(df: pd.DataFrame, row_name: str, col_name: str, metric_name: str):
"""
Works with df.pivot, except preserves the ordering of the rows and columns
in the pivoted dataframe
"""
original_row_indices = df[row_name].unique()
original_col_indices = df[col_name].unique()
pivoted = df.pivot(index=row_name, columns=col_name, values=metric_name)
pivoted = pivoted[original_col_indices]
pivoted = pivoted.reindex(original_row_indices).reset_index()
pivoted.columns.name = None
return pivoted
|
70df87eb7d1ca19116ec04854bee635a66f02908
| 19,811 |
def batch_norm_for_fc(inputs, is_training, bn_decay, scope):
""" Batch normalization on FC data.
Args:
inputs: Tensor, 2D BxC input
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0,], bn_decay)
|
e6a13c50b021785ddcb278c449ba6f9be9271106
| 19,812 |
def stat(file_name):
"""
Read information from a FreeSurfer stats file.
Read information from a FreeSurfer stats file, e.g., `subject/stats/lh.aparc.stats` or `aseg.stats`. A stats file is a text file that contains a data table and various meta data.
Parameters
----------
file_name: string
The path to the stats file.
Returns
-------
dictionary of strings (includes nested sub dicts)
The result dictionary, containing the following 4 keys:
- 'ignored_lines': list of strings. The list of lines that were not parsed in a special way. This is raw data.
- 'measures': string list of dimension (n, m) if there are n measures with m properties each stored in the stats file.
- 'table_data': string list of dimension (i, j) when there are i lines containing j values each in the table stored in the stats file. You may want to convert the columns to the proper data types and put the result into several numpy arrays or a single Pandas data frame.
- 'table_column_headers': string list. The names for the columns for the table_data. This information is parsed from the table_meta_data and given here for convenience.
- 'table_meta_data': dictionary. The full table_meta_data. Stores properties in key, value sub dictionaries. For simple table properties, the dictionaries are keys of the returned dictionary. The only exception is the information on the table columns (header data). This information can be found under the key `column_info_`, which contains one dictionary for each column. In these dictionaries, data is stored as explained for simple table properties.
Examples
--------
Read the `aseg.stats` file for a subject:
>>> import brainload as bl
>>> stats = bl.stats('/path/to/study/subject1/stats/aseg.stats')
Collect some data, just to show the data structures.
>>> print(len(stats['measures'])) # Will print the number of measures.
>>> print("|".join(stats['measures'][0])) # Print all data on the first measure.
Now lets print the table_data:
>>> num_data_rows = len(stats['table_data'])
>>> num_entries_per_row = len(stats['table_data'][0])
And get some information on the table columns (the table header):
>>> print stats['table_meta_data']['NTableCols'] # will print "10" (from a simple table property stored directly in the dictionary).
Get the names of all the data columns:
>>> print ",".join(stats['table_column_headers'])
Get the name of the first column:
>>> first_column_name = stats['table_column_headers'][0]
More detailed information on the individual columns can be found under the special `column_info_` key if needed:
>>> column2_info_dict = stats['table_meta_data']['column_info_']['2']
>>> print(column2_info_dict['some_key']) # will print the value
Note that all data is returned as string type, you will need to covert it to float (or whatever) yourself.
"""
lines = nit._read_text_file_lines(file_name)
return _parse_stats_lines(lines)
|
bf64bf488d34a32eebcb66472a3fa567bf5a368d
| 19,813 |
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer.
XXX Temporary until marshal's long function are exposed.
"""
x = int_bytes[0]
x |= int_bytes[1] << 8
x |= int_bytes[2] << 16
x |= int_bytes[3] << 24
return x
|
7b40bf05c9d47c7921b1377f0d2235c483e6ba2e
| 19,814 |
from typing import Union
from typing import Dict
from typing import Any
from typing import Iterable
from typing import Type
from typing import Optional
import dataclasses
from typing import TypeVar
from typing import cast
def parse_model(data: Union[Dict[str, Any], Iterable[Any], Any],
cls: Union[Type[TModel], Type[Any]],
rename_keys: Optional[Dict[str, str]] = None) \
-> Union[TModel, Any]:
"""Instantiates an object of the provided class cls for a provided mapping.
Instantiates an object of a class specifying a model for the provided
mapping. An entry in the mapping must be provided for all non-optional
attribute of the class. Keys are expected to be in CapsWords, matching
the snake_case corresponding class attributes. Any additional entries
found in the mapping that do not correspond to class attributes are
ignored.
Args:
data: Dictionary containing pairs with the names of the attributes
and their respective values provided to instantiate the model
class.
cls: The model class to instantiate.
rename_keys: Key names to rename to match model attribute names,
used when an automated translation of the name from CapsWords
to snake_case is to sufficient. Renaming must provide the name
in CapsWords.
Returns:
The instantiated model class object.
Raises:
TypeError: Cannot parse the value of a class attribute to the
appropriate type.
NotImplementedError: The type of a class attribute is not supported.
"""
if cls is not NoneType and dataclasses.is_dataclass(cls) \
and isinstance(data, dict):
if rename_keys:
for k, r, in rename_keys.items():
if k in data:
data[r] = data.pop(k)
field_names = set(f.name for f in dataclasses.fields(cls))
field_types = {f.name: f.type for f in dataclasses.fields(cls)}
parsed_data: Dict[str, Any] = {}
for key, value in data.items():
key = _to_snake_case(key)
if key in field_names:
field_type = field_types[key]
parsed_data[key] = parse_model(value, field_type)
args = []
for f in dataclasses.fields(cls):
if f.name in parsed_data:
a = parsed_data[f.name]
elif f.default is not dataclasses.MISSING:
a = f.default
else:
fc = getattr(f, 'default_factory')
if fc is not dataclasses.MISSING:
a = fc()
else:
raise TypeError(f'Cannot initialize class {cls}. '
f'Missing required parameter {f.name}')
args.append(a)
return cls(*args)
field_type_origin = getattr(cls, '__origin__', None)
if field_type_origin is Union:
for candidate_cls in getattr(cls, '__args__', []):
try:
return parse_model(data, candidate_cls)
except (TypeError, ValueError):
pass
raise ValueError(f'Cannot parse value {data} as {cls}')
if field_type_origin is list and isinstance(data, Iterable):
list_field_type = getattr(cls, '__args__', [])[0]
if type(list_field_type) is TypeVar:
return list(data)
return [parse_model(v, list_field_type) for v in data]
if field_type_origin is tuple and isinstance(data, Iterable):
tuple_field_types = getattr(cls, '__args__', [])
if not tuple_field_types:
return tuple(data)
return tuple(parse_model(v, tuple_field_types[0]) for v in data)
parsable_classes = tuple(getattr(ParsableClass, '__args__', []))
if cls in parsable_classes:
return _parse_class(data, cast(Type[ParsableClass], cls))
raise NotImplementedError(f'Cannot parse data {data} as {cls}.')
|
85ba92ac4c3e9df8e96612017b94db73ea53d19e
| 19,815 |
def findTopEyelid(imsz, imageiris, irl, icl, rowp, rp, ret_top=None):
"""
Description:
Mask for the top eyelid region.
Input:
imsz - Size of the eye image.
imageiris - Image of the iris region.
irl -
icl -
rowp - y-coordinate of the inner circle centre.
rp - radius of the inner circle centre.
ret_top - Just used for returning result when using multiprocess.
Output:
mask - Map of noise that will be masked with NaN values.
"""
topeyelid = imageiris[0: rowp - irl - rp, :]
lines = findline(topeyelid)
mask = np.zeros(imsz, dtype=float)
if lines.size > 0:
xl, yl = linecoords(lines, topeyelid.shape)
yl = np.round(yl + irl - 1).astype(int)
xl = np.round(xl + icl - 1).astype(int)
yla = np.max(yl)
y2 = np.arange(yla)
mask[yl, xl] = np.nan
grid = np.meshgrid(y2, xl)
mask[grid] = np.nan
# Return
if ret_top is not None:
ret_top[0] = mask
return mask
|
9c01e0966a1800ed76f5370cbc382a801af08d67
| 19,816 |
def script_with_queue_path(tmpdir):
"""
Pytest fixture to return a path to a script with main() which takes
a queue and procedure as arguments and adds procedure process ID to queue.
"""
path = tmpdir.join("script_with_queue.py")
path.write(
"""
def main(queue, procedure):
queue.put(procedure.pid)
"""
)
return f"file://{str(path)}"
|
7c2c2b4c308f91d951496c53c9bdda214f64c776
| 19,817 |
import configparser
def readini(inifile):
""" This function will read in data from a configureation file.
Inputs
inifile- The name of the configuration file.
Outputs
params - A dictionary with keys from INIOPTIONS that holds all of
the plotting parameters.
"""
if inifile is None:
return
config =configparser()
config.read(inifile)
params={i:None for i in INIOPTIONS}
# Read in data from ini file
for ip in config.options('params'):
# get the original param name
rname = config.get('paramsnames',ip)
# get the parameter and split it up
params[rname] = config.get('params',ip)
params[rname]=params[rname].split(" ")
# If its a single object try to
if len(params[rname])==1:
params[rname]=params[rname][0]
try:
params[rname]=float(params[rname])
except Exception:
pass
else:
for a in range(len(params[rname])):
try:
params[rname][a]=float(params[rname][a])
except Exception:
pass
# turn the time bounds to time stamps
if not params['timebounds']is None:
timelist = params['timebounds']
params['timebounds']=str2posix(timelist)
# which times will have names
if params['TextList'] is None:
params['TextList']=[]
# change param height to a list of lists
if not params['paramheight'] is None:
l1 = params['paramheight'][::2]
l2 = params['paramheight'][1::2]
params['paramheight']=[[i,j] for i,j in zip(l1,l2)]
if not params['paramlim'] is None:
l1 = params['paramlim'][::2]
l2 = params['paramlim'][1::2]
params['paramlim']=[[i,j] for i,j in zip(l1,l2)]
# Default for reinterp is false
if params['reinterp']is None:
params['reinterp']=False
else:
params['reinterp'] = params['reinterp'].lower()=='yes'
return params
|
eb5800f00cc8e58557e11fb9ff525e7f407c9eab
| 19,818 |
def get_param_store():
"""
Returns the ParamStore
"""
return _PYRO_PARAM_STORE
|
d71ab10f2029fab735268956590094d8c94dd150
| 19,819 |
def secret_add(secret):
"""
Return a lambda that adds the argument from the lambda to the argument passed into secret_add.
:param secret: secret number to add (integer)
:return: lambda that takes a number and adds it to the secret
"""
return lambda addend: secret + addend
|
151f1cff9f0e0bbb43650d63592ba0c2cb05611e
| 19,820 |
def morseToBoolArr(code, sps, wpm, fs=None):
""" morse code to boolean array
Args:
code (str): morse code
sps: Samples per second
wpm: Words per minute
fs: Farnsworth speed
Returns:
boolean numpy array
"""
dps = wpmToDps(wpm) # dots per second
baseSampleCount = sps / dps
samplesPerDot = int(round(baseSampleCount))
samplesPerDash = int(round(baseSampleCount * DASH_WIDTH))
samplesBetweenElements = int(round(baseSampleCount))
farnsworthScale = farnsworthScaleFactor(wpm, fs)
samplesBetweenLetters = int(round(baseSampleCount * CHAR_SPACE * farnsworthScale))
samplesBetweenWords = int(round(baseSampleCount * WORD_SPACE * farnsworthScale))
dotArr = np.ones(samplesPerDot, dtype=np.bool)
dashArr = np.ones(samplesPerDash, dtype=np.bool)
eGapArr = np.zeros(samplesBetweenElements, dtype=np.bool)
cGapArr = np.zeros(samplesBetweenLetters, dtype=np.bool)
wGapArr = np.zeros(samplesBetweenWords, dtype=np.bool)
pieces = []
prevWasSpace = False
prevWasElement = False
for c in code:
if (c == DOT or c == DASH) and prevWasElement:
pieces.append(eGapArr)
if c == DOT:
pieces.append(dotArr)
prevWasSpace, prevWasElement = False, True
elif c == DASH:
pieces.append(dashArr)
prevWasSpace, prevWasElement = False, True
else: # Assume the char is a space otherwise
if prevWasSpace:
pieces[-1] = wGapArr
else:
pieces.append(cGapArr)
prevWasSpace, prevWasElement = True, False
return np.concatenate(pieces)
|
90b4225a2a9979ac7f813a1f964b64ef9310ee23
| 19,821 |
import csv
import array
def LaserOptikMirrorTransmission(interpolated_wavelengths,refractive_index = "100", shift_spectrum=7,rescale_factor=0.622222):
"""
Can be used for any wavelengths in the range 400 to 800 (UNITS: nm)
Uses supplied calculation from LaserOptik
Interpolate over selected wavelengths: returns a function which takes wavelength (nm) as argument
Shifts transmission spectrum with calibration still to come, likewise for "rescale_factor"
"refractive_index" argument is only for backwards compatibility
"""
reflectivity_folder = data_root_folder + folder_separator+ "calibration_data" + folder_separator
#reflectivity_folder = "./"
reflectivity_filename = "LaserOptik20160129_Theorie_T.DAT"
fname = reflectivity_folder+reflectivity_filename
res = csv.reader(open(fname), delimiter='\t')
refl_text = [x for x in res][1:] #removes column headings
original_wavelengths = array([float(l[0]) for l in refl_text])
original_transmissions = array([float(l[1]) for l in refl_text])
original_reflectivities = 1-original_transmissions
#
wavelength_shift = 0
if shift_spectrum == "planar": #shift to be measured
wavelength_shift = 0
elif shift_spectrum == "spherical":
wavelength_shift = 0 # shift to be measured
elif isinstance(shift_spectrum,Number):
wavelength_shift = shift_spectrum
#
interpolated_transmission_func = interp1d(original_wavelengths,original_transmissions)
interpolated_transmissions = interpolated_transmission_func(interpolated_wavelengths + wavelength_shift)
#Transmission to be calibrated at at least one narrow wavelength
#Assume transmission scales with this factor at all wavelengths [not well justified assumption]
interpolated_transmissions = interpolated_transmissions / rescale_factor
return interpolated_transmissions
|
a3eafd7a788ddcfdedd8e25081f6e7cc1fd03cc5
| 19,822 |
import math
def menu(prompt, titles, cols=1, col_by_col=True, exc_on_cancel=None,
caption=None, default=None):
"""Show a simple menu.
If the input is not allowed the prompt will be shown again. The
input can be cancelled with EOF (``^D``).
The caller has to take care that the menu will fit in the terminal.
::
def update():
...
def sort(desc=True, duration=True):
...
items = (
('Update', update),
('Sort duration desc', sort),
('Sort duration asc', sort, False),
('Sort size desc', sort, True, False),
('Sort size asc', sort, False, False),
)
i = menu('> ', tuple(x[0] for x in items))
print()
if i is not None:
items[i][1](*items[i][2:])
.. raw:: html
<pre style="color:#FFFFFF;background-color:#000000">[1] Update
[2] Sort duration desc
[3] Sort duration asc
[4] Sort size desc
[5] Sort size asc
> </pre>
:param str prompt: the prompt
:param tuple titles: the titles of the menu options
:param int cols: number of columns
:param bool col_by_col: if ``True`` the menu will be filled
column-by-column, otherwise row-by-row
:param bool exc_on_cancel: if ``True`` an EOF will cause an Exception;
if ``None`` the value of ``exception_on_cancel``
will be used
:param str caption: caption for the menu
:param int default: number of the default menu option
:return: index of the selected option in ``titles`` or None if cancelled
and ``exc_on_cancel=False``
:rtype: int or None
:raises EOFError: if input was cancelled and ``exc_on_cancel=True``
:raises TypeError: if ``titles`` is not a tuple or ``default`` is not
an integer
.. versionadded:: 0.4.0
.. versionchanged:: 0.6.0
Add parameter ``caption``
.. versionchanged:: 0.17.0
Add parameter ``default``
"""
if default is not None:
check_type(default, int, 'default')
if not (0 < default <= (len(titles))):
raise ValueError(
f'default must be > 0 and <= {len(titles)}, got {default}')
check_type(titles, tuple, 'titles')
rows = math.ceil(len(titles) / cols)
num_width = len(str(len(titles)))
title_width = max(map(len, titles))
if col_by_col:
indices = (x + rows * y for x in range(rows) for y in range(cols))
else:
indices = range(len(titles))
lines = []
row = []
for cnt, idx in enumerate(indices, 1):
if idx < len(titles):
row.append(f'[{idx + 1:{num_width}}] {titles[idx]:{title_width}}')
if cnt % cols == 0:
lines.append(' '.join(row))
lines.append('\n')
row.clear()
if row:
lines.append(' '.join(row))
lines.append('\n')
if caption:
width = max(len(caption), max(map(len, lines)))
text = caption.center(width) + '\n' + '-' * width + '\n'
else:
text = ''
text += ''.join(lines) + prompt
def f(s):
i = int(s)
if 0 < i <= len(titles):
return i - 1
raise ValueError
return read(text, check=f, exc_on_cancel=exc_on_cancel,
default=str(default))
|
d4d24cddf40f314c31415685c4eecdc51da2aca2
| 19,823 |
from typing import Dict
from typing import Any
def dict_to_annotation(annotation_dict: Dict[str, Any], ignore_extra_keys = True) -> Annotation:
"""Calls specific Category object constructor based on the structure of the `annotation_dict`.
Args:
annotation_dict (Dict[str, Any]): One of COCO Annotation dictionaries.
ignore_extra_keys (bool, optional): Ignore the fact dictionary has more fields than specified in dataset. Defaults to True.
Raises:
ValueError: If `annotation_dict` has unspecified structure.
Returns:
Annotation: Dataclass category generated from the `annotation_dict`.
"""
if set(DICT_TO_ANNOTATION_MAP['object_detection']).issubset(annotation_dict.keys()):
return ObjectDetectionAnnotation.from_dict(annotation_dict, ignore_extra_keys)
elif set(DICT_TO_ANNOTATION_MAP['keypoint_detection']).issubset(annotation_dict.keys()):
return KeypointDetectionAnnotation.from_dict(annotation_dict, ignore_extra_keys)
elif set(DICT_TO_ANNOTATION_MAP['panoptic_segmentation']).issubset(annotation_dict.keys()):
return PanopticSegmentationAnnotation.from_dict(annotation_dict, ignore_extra_keys)
elif set(DICT_TO_ANNOTATION_MAP['image_captioning']).issubset(annotation_dict.keys()):
return ImageCaptioningAnnotation.from_dict(annotation_dict, ignore_extra_keys)
elif set(DICT_TO_ANNOTATION_MAP['dense_pose']).issubset(annotation_dict.keys()):
return DensePoseAnnotation.from_dict(annotation_dict, ignore_extra_keys)
raise ValueError(
"Unexpected annotation structure. Consider manually creating COCO dataset."
"\nAnd extending one of existing objects or create new following one of the Protocols structure."
)
|
846c353ab4d15a0558c5cabced30e14a57b5ed64
| 19,824 |
def getAggregation(values):
"""
Produces a dictionary mapping raw states to aggregated states in the form
{raw_state:aggregated_state}
"""
unique_values = list(set(values))
aggregation = {i:unique_values.index(v) for i, v in enumerate(values)}
aggregation['n'] = len(unique_values)
return aggregation
|
606bee7c7055b8f2a95bc061dc1883713198b506
| 19,825 |
def create_anonymous_client():
"""Creates an anonymous s3 client. This is useful if you need to read an object created by an anonymous user, which
the normal client won't have access to.
"""
return boto3.client('s3', config=Config(signature_version=UNSIGNED))
|
1c321d2c42b41b19a4fad66e67199f63c2b04338
| 19,826 |
def predictionTopK(pdt, k):
"""预测值中topk
@param pdt 预测结果,nupmy数组格式
@param k 前k个结果
@return topk结果,numpy数组格式
"""
m, n = np.shape(pdt)
ret = []
for i in range(m):
curNums = pdt[i]
tmp = topK(curNums.tolist()[0], k)
ret.append(tmp)
return np.mat(ret)
|
57740bb3e2f4521d14273194dc024e8a91347241
| 19,827 |
import argparse
import os
from datetime import datetime
import requests
import json
def extract_url(args: argparse.Namespace) -> dict:
"""Extracts data from products.json endpoint from specified args.
Args:
args (argparse.Namespace): Parsed args.
Returns:
dict: Data logged from extraction, including if successful
or errors present.
"""
p = format_url(args.url, scheme='https', return_type='parse_result')
formatted_url = p.geturl()
json_key = 'products'
if args.collections:
json_key = 'collections'
fp = os.path.join(
args.dest_path, f'{p.netloc}.{json_key}.json')
if args.file_path:
fp = os.path.join(
args.dest_path, f'{args.file_path}.json')
endpoint = f'{formatted_url}/{json_key}.json'
ret = {
'url': endpoint,
'collected_at': str(datetime.now()),
'success': False,
'error': '',
'file_path': '',
}
try:
data = extract(endpoint, json_key, args.page_range)
except requests.exceptions.HTTPError as err:
ret['error'] = str(err)
except json.decoder.JSONDecodeError as err:
ret['error'] = str(err)
except Exception as err:
ret['error'] = str(err)
else:
ret['success'] = True
ret[json_key] = data
if ret['success']:
ret['file_path'] = fp
json_to_file(fp, data)
return ret
|
42967e5bd89b688585d64982f419a19e341232a5
| 19,828 |
def _devive_from_rsrc_id(app_unique_name):
"""Format devices names.
:returns:
``tuple`` - Pair for device names based on the app_unique_name.
"""
# FIXME(boysson): This kind of manipulation should live elsewhere.
_, uniqueid = app_unique_name.rsplit('-', 1)
veth0 = '{id:>013s}.0'.format(id=uniqueid)
veth1 = '{id:>013s}.1'.format(id=uniqueid)
return (veth0, veth1)
|
452b1c23f56ae048571f4d8a8567585230ed6c91
| 19,829 |
import io
import os
def fit_noise_1d(npower,lmin=300,lmax=10000,wnoise_annulus=500,bin_annulus=20,lknee_guess=3000,alpha_guess=-4,
lknee_min=0,lknee_max=9000,alpha_min=-5,alpha_max=1,allow_low_wnoise=False):
"""Obtain a white noise + lknee + alpha fit to a 2D noise power spectrum
The white noise part is inferred from the mean of lmax-wnoise_annulus < ells < lmax
npower is 2d noise power
"""
fbin_edges = np.arange(lmin,lmax,bin_annulus)
modlmap = npower.modlmap()
fbinner = stats.bin2D(modlmap,fbin_edges)
cents,dn1d = fbinner.bin(npower)
w2 = dn1d[np.logical_and(cents>=(lmax-wnoise_annulus),cents<lmax)].mean()
try:
# print(w2)
assert w2>0
# pl = io.Plotter('Dell')
# pl.add(cents,dn1d)
# pl.add(cents,cents*0+w2)
# pl.done(os.environ['WORK']+"/nonpos_white_works.png")
except:
print("White noise level not positive")
print(w2)
if not(allow_low_wnoise):
pl = io.Plotter('Dell')
pl.add(cents,dn1d)
pl.done(os.environ['WORK']+"/nonpos_white.png")
raise
else:
w2 = np.abs(w2)
print("Setting to ",w2)
wnoise = np.sqrt(w2)*180.*60./np.pi
ntemplatefunc = lambda x,lknee,alpha: fbinner.bin(rednoise(modlmap,wnoise,lknee=lknee,alpha=alpha))[1]
#ntemplatefunc = lambda x,lknee,alpha: rednoise(x,wnoise,lknee=lknee,alpha=alpha) # FIXME: This switch needs testing !!!!
res,_ = curve_fit(ntemplatefunc,cents,dn1d,p0=[lknee_guess,alpha_guess],bounds=([lknee_min,alpha_min],[lknee_max,alpha_max]))
lknee_fit,alpha_fit = res
# print(lknee_fit,alpha_fit,wnoise)
# pl = io.Plotter(xyscale='linlog',xlabel='l',ylabel='D',scalefn=lambda x: x**2./2./np.pi)
# pl.add(cents,dn1d)
# pl.add(cents,cents*0+w2)
# pl.add(cents,rednoise(cents,wnoise,lknee=lknee_fit,alpha=alpha_fit),ls="--")
# pl.add(cents,rednoise(cents,wnoise,lknee=lknee_guess,alpha=alpha_guess),ls="-.")
# pl._ax.set_ylim(1e-1,1e4)
# pl.done(os.environ['WORK']+"/fitnoise_pre.png")
# sys.exit()
return wnoise,lknee_fit,alpha_fit
|
c162f2f77ede793827475cbedaa3ead00420902f
| 19,830 |
import re
def parseCsv(file_content):
"""
parseCsv
========
parser a string file from Shimadzu analysis, returning a
dictonary with current, livetime and sample ID
Parameters
----------
file_content : str
shimadzu output csv content
Returns
-------
dic
dic with irradiation parameters
"""
irradiation_parameters = {}
irradiation_parameters['sample'] = file_content.split(',')[0].split(':')[1].replace("\"", "").strip()
irradiation_parameters['current'] = re.sub(' +',' ',file_content.split(',')[12]).split(' ')[3]
irradiation_parameters['current'] = int(re.findall('\d+', irradiation_parameters['current'])[0])
irradiation_parameters['livetime'] = int(re.sub(' +',' ',file_content.split(',')[12]).split(' ')[13])
return(irradiation_parameters)
|
cc20a906c23093994ce53358d92453cd4a9ab459
| 19,831 |
def unpackFITS(h5IN, h5archive, overwrite=True):
# ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
""" Package contents of an h5 block to multi-extention FITS files """
"""
MAJOR BUG: This does not like the ExtendLinked HDF5 files one bit... Only
real blocks. No idea why.
(1) Read h5 baby block of symbolic links.
(2) Count number of target blocks.
(3) Begin loop over packaging function.
-- Write a single row of the SAMI Master as a primary HDU.
-- Write each dataset as a FITS extension with corresponding header.
(4) Inform user of FITS file screated, exit successfully.
"""
# Open h5 file.
hdf = h5.File(h5IN, 'r')
# Count number of target blocks.
version = hdf['/SAMI'].keys()[0] # = getVersion(h5IN, hdf, '')
# *** Assuming only one version of data available.
g_version = hdf['/SAMI/'+version]
nTarget = 0
nCalibrator = 0
if 'Target' in g_version.keys():
nTarget = len(g_version['Target'].keys())
gTarget = g_version['Target']
thereAreTargets = True
if 'Calibrator' in g_version.keys():
nCalibrator = len(g_version['Calibrator'].keys())
gCalibrator = g_version['Calibrator']
thereAreCalibrators = True
nGroups = nTarget + nCalibrator
def plural(nGroups):
plural = ''
if nGroups > 1: plural == 's'
return(plural)
print("Identified "+str(nGroups)+" Target Block"+plural(nGroups)+\
" in '"+h5IN+"'.")
def stripTable(name, version, h5archive):
#master = hdf['/SAMI/'+version+'/Table/SAMI_MASTER']
h5archive = h5.File(h5archive, 'r')
master = h5archive['/SAMI/'+version+'/Table/SAMI_MASTER']
tabline = master[master["CATID"] == int(name)][0]
# For now excluding all strings to make FITS-compatible
# *** BUT HEADER will not know that.
hdu = [v for v in tabline if not isinstance(v, str)]
hdr = makeHead(master)
h5archive.close()
return(hdu, hdr)
# Begin loop over all SAMI targets requested.
# *** CURRENTLY ONLY Targets, not Calibrators. Combine groups in a list?
for thisG in range(nTarget):
# What is the SAMI name of this target?
name = gTarget.keys()[thisG]
# Search for 'Cube' and 'RSS' among Dsets to define output filename
areThereCubes = ['Cube' in s for s in gTarget[name].keys()]
areThereRSS = ['RSS' in s for s in gTarget[name].keys()]
sContents = []
if sum(areThereCubes) > 0: sContents.append('cubes')
if sum(areThereRSS) > 0: sContents.append('RSS')
if len(sContents) > 1: sContents = '_'.join(sContents)
else: sContents = sContents[0]
# Define output filename
fname = 'SAMI_'+name+'_'+sContents+'.fits'
# Primary HDU is a single row of the Master table.
hdu0, hdr0 = stripTable(name, version, h5archive)
hdulist = pf.HDUList([pf.PrimaryHDU(hdu0, header=hdr0)])
# Cycle through all dsets, make HDUs and headers with native names.
# Get number of datasets.
thisTarget = gTarget[name]
nDsets = len(thisTarget.keys())
# Begin loop through all datasets.
for thisDset in range(nDsets):
#for thisDset in range(5):
# Determine dataset.
dsetName = thisTarget.keys()[thisDset]
print("Processing dataset '"+dsetName+"'...")
# Create dataset and populate header.
data = thisTarget[dsetName]
hdr = makeHead(data)
# Add all this to an HDU.
hdulist.append(
pf.ImageHDU(np.array(thisTarget[dsetName]),
name=dsetName,
header=makeHead(data) ) )
# Write to a new FITS file.
hdulist.writeto(fname, clobber=overwrite)
hdf.close()
|
c56b9324df4fcf8d0cf265ed671cdf10ac2bb80d
| 19,832 |
def s_from_v(speed, time=None):
"""
Calculate {distance} from {speed}
The chosen scheme: speed at [i] represents the distance from [i] to [i+1].
This means distance.diff() and time.diff() are shifted by one index from
speed. I have chosen to extrapolate the position at the first index by
assuming we start at a cumulative distance of 0.
Args:
{speed_arg}
{time_arg} Default None.
Returns:
{distance_returns}
"""
if time is None:
time = pd.Series([i for i in range(len(speed))])
# Should this assume the index at position 0 is 0, or should this
# assume the only NaN is at position 0? Assumpts either way...
return (speed.shift(1) * time.diff()).cumsum().fillna(0)
|
42fa996371af55c97235c2260f1c7c873e7e9e5b
| 19,833 |
def fake_categorize_file(tmpdir_factory):
"""Creates a simple categorize for testing."""
file_name = tmpdir_factory.mktemp("data").join("categorize.nc")
root_grp = netCDF4.Dataset(file_name, "w", format="NETCDF4_CLASSIC")
n_points = 7
root_grp.createDimension('time', n_points)
var = root_grp.createVariable('time', 'f8', 'time')
var[:] = np.arange(n_points)
var = root_grp.createVariable('category_bits', 'i4', 'time')
var[:] = [0, 1, 2, 4, 8, 16, 32]
var = root_grp.createVariable('quality_bits', 'i4', 'time')
var[:] = [0, 1, 2, 4, 8, 16, 32]
root_grp.close()
return file_name
|
17b8e106f19200291bf2a77805542ecf2bb395fe
| 19,834 |
def safe_unsigned_div(a, b, eps=None):
"""Calculates a/b with b >= 0 safely.
a: A `float` or a tensor of shape `[A1, ..., An]`, which is the nominator.
b: A `float` or a tensor of shape `[A1, ..., An]`, which is the denominator.
eps: A small `float`, to be added to the denominator. If left as `None`, its
value is automatically selected using `b.dtype`.
name: A name for this op. Defaults to 'safe_unsigned_div'.
Raises:
InvalidArgumentError: If tf-graphics debug flag is set and the division
causes `NaN` or `Inf` values.
Returns:
A tensor of shape `[A1, ..., An]` containing the results of division.
"""
if eps is None:
eps = 10.0 * np.finfo(b.dtype).tiny
return a / (b + eps)
|
7795976eab37ae664306c91f724aa163472430b3
| 19,835 |
def load_unpack_npz(path):
"""
Simple helper function to circumvent hardcoding of
keyword arguments for NumPy zip loading and saving.
This assumes that the first entry of the zipped array
contains the keys (in-order) for the rest of the array.
Parameters
----------
path : string
Path to load the NumPy zip file
Returns
----------
data : dict
Unpacked dictionary with specified keys inserted
"""
# Load the NumPy zip file at the path
data = dict(np.load(path, allow_pickle=True))
# Extract the key names stored in the dictionary
keys = data.pop(list(data.keys())[0])
# Obtain the names of the saved keys
old_keys = list(data.keys())
# Re-add all of the entries of the data with the specified keys
for i in range(len(keys)):
data[keys[i]] = data.pop(old_keys[i])
return data
|
ae5d573a480a87d3c09e8de783d9b94558870c15
| 19,836 |
import re
def is_valid_email(email):
"""
Check if a string is a valid email.
Returns a Boolean.
"""
try:
return re.match(EMAIL_RE, email) is not None
except TypeError:
return False
|
736b3f141e6f3a99644d51c672738d63d64d604e
| 19,837 |
import re
def _create_matcher(utterance):
"""Create a regex that matches the utterance."""
# Split utterance into parts that are type: NORMAL, GROUP or OPTIONAL
# Pattern matches (GROUP|OPTIONAL): Change light to [the color] {name}
parts = re.split(r'({\w+}|\[[\w\s]+\] *)', utterance)
# Pattern to extract name from GROUP part. Matches {name}
group_matcher = re.compile(r'{(\w+)}')
# Pattern to extract text from OPTIONAL part. Matches [the color]
optional_matcher = re.compile(r'\[([\w ]+)\] *')
pattern = ['^']
for part in parts:
group_match = group_matcher.match(part)
optional_match = optional_matcher.match(part)
# Normal part
if group_match is None and optional_match is None:
pattern.append(part)
continue
# Group part
if group_match is not None:
pattern.append(
r'(?P<{}>[\w ]+?)\s*'.format(group_match.groups()[0]))
# Optional part
elif optional_match is not None:
pattern.append(r'(?:{} *)?'.format(optional_match.groups()[0]))
pattern.append('$')
return re.compile(''.join(pattern), re.I)
|
ecf126488f827c65379efc58794136499dfa87dd
| 19,838 |
import os
import glob
def loadTrainingData(path_to_follow):
"""
It loads the images, assuming that these are in the /IMG/ subfolder.
Also, the output is in the CSV file "steering.csv".
:param path_to_follow: is the full path where the images are placed.
:return: a list with (1) a numpy array with the images in RGB color,
(2) a numpy array with the steering angle, (3) a numpy array
with the class label, and (4) the data logs.
"""
data_path = os.path.join(path_to_follow, "*.csv")
files = glob.glob(data_path)
data_log = pd.read_csv(files[0])
# Check special case of relative paths...
if len(grep(data_log['path'][0], "^\s*IMG.+")) > 10:
data_log['path'] = path_to_follow + data_log['path']
dataset = []
for f in data_log['path']:
img = mpimg.imread(f)
img = img.astype('uint8')
dataset.append(img)
del img
dataset = np.array(dataset, dtype="uint8")
labels = np.array(data_log['label'], dtype="uint8")
steering = np.array(data_log['steering'], dtype="float32")
return (dataset, steering, labels, data_log)
|
2e7b4abec48158fb9aa19f17dc219737a56c46f1
| 19,839 |
def top_compartment_air_CO2(setpoints: Setpoints, states: States, weather: Weather):
"""
Equation 2.13 / 8.13
cap_CO2_Top * top_CO2 = mass_CO2_flux_AirTop - mass_CO2_flux_TopOut
"""
cap_CO2_Top = Coefficients.Construction.greenhouse_height - Coefficients.Construction.air_height # Note: line 46 / setDepParams / GreenLight
mass_CO2_flux_AirTop = greenhouse_air_and_above_thermal_screen_CO2_flux(states, setpoints, weather)
mass_CO2_flux_TopOut = above_thermal_screen_and_outdoor_CO2_flux(states, setpoints, weather)
return (mass_CO2_flux_AirTop - mass_CO2_flux_TopOut) / cap_CO2_Top
|
7f294de2f669c2224ccbd09d8200e9b91ddd6ebe
| 19,840 |
def coning_sculling(gyro, accel, order=1):
"""Apply coning and sculling corrections to inertial readings.
The algorithm assumes a polynomial model for the angular velocity and the
specific force, fitting coefficients by considering previous time
intervals. The algorithm for a linear approximation is well known and
described in [1]_ and [2]_.
The accelerometer readings are also corrected for body frame rotation
during a sampling period.
Parameters
----------
gyro : array_like, shape (n_readings, 3)
Gyro readings.
accel : array_like, shape (n_readings, 3)
Accelerometer readings.
order : {0, 1, 2}, optional
Angular velocity and specific force polynomial model order.
Note that 0 means not applying non-commutative corrections at all.
Default is 1.
Returns
-------
theta : ndarray, shape (n_readings, 3)
Estimated rotation vectors.
dv : ndarray, shape (n_readings, 3)
Estimated velocity increments.
References
----------
.. [1] P. G. Savage, "Strapdown Inertial Navigation Integration Algorithm
Design Part 1: Attitude Algorithms", Journal of Guidance, Control,
and Dynamics 1998, Vol. 21, no. 2.
.. [2] P. G. Savage, "Strapdown Inertial Navigation Integration Algorithm
Design Part 2: Velocity and Position Algorithms", Journal of
Guidance, Control, and Dynamics 1998, Vol. 21, no. 2.
"""
if order not in [0, 1, 2]:
raise ValueError("`order` must be 1, 2 or 3.")
gyro = np.asarray(gyro)
accel = np.asarray(accel)
if order == 0:
coning = 0
sculling = 0
elif order == 1:
coning = np.vstack((np.zeros(3), np.cross(gyro[:-1], gyro[1:]) / 12))
sculling = np.vstack((np.zeros(3),
(np.cross(gyro[:-1], accel[1:]) +
np.cross(accel[:-1], gyro[1:])) / 12))
elif order == 2:
coning = (-121 * np.cross(gyro[2:], gyro[1:-1]) +
31 * np.cross(gyro[2:], gyro[:-2]) -
np.cross(gyro[1:-1], gyro[:-2])) / 720
sculling = (-121 * np.cross(gyro[2:], accel[1:-1]) +
31 * np.cross(gyro[2:], accel[:-2]) -
np.cross(gyro[1:-1], accel[:-2]) -
121 * np.cross(accel[2:], gyro[1:-1]) +
31 * np.cross(accel[2:], gyro[:-2]) -
np.cross(accel[1:-1], gyro[:-2])) / 720
coning = np.vstack((np.zeros((2, 3)), coning))
sculling = np.vstack((np.zeros((2, 3)), sculling))
else:
assert False
rc = 0.5 * np.cross(gyro, accel)
return gyro + coning, accel + sculling + rc
|
61f57383488d2d42cb6d63fec5d6d99faa8e2cf2
| 19,841 |
def add():
"""Add a task.
:url: /add/
:returns: job
"""
job = scheduler.add_job(
func=task2,
trigger="interval",
seconds=10,
id="test job 2",
name="test job 2",
replace_existing=True,
)
return "%s added!" % job.name
|
88ef667e05a37ec190ba6e01df23fd617821afe0
| 19,842 |
def magnitude(x: float, y: float, z: float) -> float:
""" Magnitude of x, y, z acceleration √(x²+y²+z²)
Dispatch <float>
Args:
x (float): X-axis of acceleration
y (float): Y-axis of acceleration
z (float): Z-axis of acceleration
Returns:
float: Magnitude of acceleration
Dispatch <pd.DataFrame>
Args:
df (pd.DataFrame): Dataframe containing acceleration columns
xcol (str): X-axis column name, default 'x'
ycol (str): Y-axis column name, default 'y'
zcol (str): Z-axis column name, default 'z'
Returns:
float: Magnitude of acceleration
"""
return np.sqrt(x**2 + y**2 + z**2)
|
ecb0543b52c385a9b294e87e4b17346b9705f3f8
| 19,843 |
def dauth( bot, input ):
"""Toggle whether channel should be auth enabled by default"""
if not input.admin:
return False
if not input.origin[0] == ID.HON_SC_CHANNEL_MSG:
bot.reply("Run me from channel intended for the default auth!")
else:
cname = bot.id2chan[input.origin[2]]
authed = False
if cname in bot.config.default_auth:
bot.config.set_del( 'default_auth', cname )
else:
bot.config.set_add( 'default_auth', cname )
authed = True
bot.reply( "Default auth in this channel is now " + ( authed and "enabled" or "disabled" ) )
|
a031e1feb45956503c30ddacd40d59a5f65f30ca
| 19,844 |
import json
def write_to_disk(func):
"""
decorator used to write the data into disk during each checkpoint to help us to resume the operation
Args:
func:
Returns:
"""
def wrapper(*args, **kwargs):
func(*args, **kwargs)
with open("checkpoint.json", "r") as f:
f.write(json.dumps(args[0]))
return wrapper
|
d3614b7b75adf40021c31263fbbcdfdda025d1a3
| 19,845 |
import shlex
import os
import stat
def _get_partition(device, uuid):
"""Find the partition of a given device."""
LOG.debug("Find the partition %(uuid)s on device %(dev)s",
{'dev': device, 'uuid': uuid})
try:
_rescan_device(device)
lsblk = utils.execute('lsblk', '-PbioKNAME,UUID,PARTUUID,TYPE', device)
report = lsblk[0]
for line in report.split('\n'):
part = {}
# Split into KEY=VAL pairs
vals = shlex.split(line)
for key, val in (v.split('=', 1) for v in vals):
part[key] = val.strip()
# Ignore non partition
if part.get('TYPE') not in ['md', 'part']:
# NOTE(TheJulia): This technically creates an edge failure
# case where a filesystem on a whole block device sans
# partitioning would behave differently.
continue
if part.get('UUID') == uuid:
LOG.debug("Partition %(uuid)s found on device "
"%(dev)s", {'uuid': uuid, 'dev': device})
return '/dev/' + part.get('KNAME')
if part.get('PARTUUID') == uuid:
LOG.debug("Partition %(uuid)s found on device "
"%(dev)s", {'uuid': uuid, 'dev': device})
return '/dev/' + part.get('KNAME')
else:
# NOTE(TheJulia): We may want to consider moving towards using
# findfs in the future, if we're comfortable with the execution
# and interaction. There is value in either way though.
# NOTE(rg): alternative: blkid -l -t UUID=/PARTUUID=
try:
findfs, stderr = utils.execute('findfs', 'UUID=%s' % uuid)
return findfs.strip()
except processutils.ProcessExecutionError as e:
LOG.debug('First fallback detection attempt for locating '
'partition via UUID %(uuid)s failed. '
'Error: %(err)s',
{'uuid': uuid,
'err': e})
try:
findfs, stderr = utils.execute(
'findfs', 'PARTUUID=%s' % uuid)
return findfs.strip()
except processutils.ProcessExecutionError as e:
LOG.debug('Secondary fallback detection attempt for '
'locating partition via UUID %(uuid)s failed. '
'Error: %(err)s',
{'uuid': uuid,
'err': e})
# Last fallback: In case we cannot find the partition by UUID
# and the deploy device is an md device, we check if the md
# device has a partition (which we assume to contain the root fs).
if hardware.is_md_device(device):
md_partition = device + 'p1'
if (os.path.exists(md_partition)
and stat.S_ISBLK(os.stat(md_partition).st_mode)):
LOG.debug("Found md device with partition %s",
md_partition)
return md_partition
else:
LOG.debug('Could not find partition %(part)s on md '
'device %(dev)s',
{'part': md_partition,
'dev': device})
# Partition not found, time to escalate.
error_msg = ("No partition with UUID %(uuid)s found on "
"device %(dev)s" % {'uuid': uuid, 'dev': device})
LOG.error(error_msg)
raise errors.DeviceNotFound(error_msg)
except processutils.ProcessExecutionError as e:
error_msg = ('Finding the partition with UUID %(uuid)s on '
'device %(dev)s failed with %(err)s' %
{'uuid': uuid, 'dev': device, 'err': e})
LOG.error(error_msg)
raise errors.CommandExecutionError(error_msg)
|
e0a5fbed8764b8c2c822fd25907368e9e08cecf8
| 19,846 |
def get_hourly_total_exchange_volume_in_period_from_db_trades(tc_db, start_time, end_time):
"""
Get the exchange volume for this exchange in this period from our saved version
of the trade history.
"""
# Watch this query for performance.
results = tc_db.query(
func.hour(EHTrade.timestamp),
func.sum(EHTrade._volume),
)\
.filter(EHTrade.timestamp >= start_time)\
.filter(EHTrade.timestamp < end_time)\
.group_by(func.hour(EHTrade.timestamp))\
.all()
formatted_results = []
for row in results:
hour = row[0]
timestamp = Delorean(start_time, 'UTC').next_hour(hour).datetime
volume = Money(row[1], 'BTC')
formatted_results.append([
timestamp,
volume,
])
formatted_results = sorted(formatted_results, key=lambda r: r[0])
return formatted_results
|
7d46327e8c89d928d7e208d9a00d7b199345e636
| 19,847 |
import typing
def descending(sorting_func: typing.Any) -> typing.Any:
"""
Modify a sorting function to sort in descending order.
:param sorting_func: the original sorting function
:return: the modified sorting function
"""
def modified_sorting_func(current_columns, original_columns, sorting_func=sorting_func):
return sqlalchemy.sql.desc(sorting_func(current_columns, original_columns))
modified_sorting_func.require_original_columns = getattr(sorting_func, 'require_original_columns', False)
return modified_sorting_func
|
30afe7202648950af5d415f3a2da3af5e9ab9d8f
| 19,848 |
def circleColor(renderer, x, y, rad, color):
"""Draws an unfilled circle to the renderer with a given color.
If the rendering color has any transparency, blending will be enabled.
Args:
renderer (:obj:`SDL_Renderer`): The renderer to draw on.
x (int): The X coordinate of the center of the circle.
y (int): The Y coordinate of the center of the circle.
rad (int): The radius (in pixels) of the circle.
color (int): The color to draw with as a 32-bit ``0xRRGGBBAA`` integer
(e.g. ``0xFF0000FF`` for solid red).
Returns:
int: 0 on success, or -1 on failure.
"""
return _funcs["circleColor"](renderer, x, y, rad, color)
|
6b3475f918cfb0867799710e5c53e5eea326a2c2
| 19,849 |
def _get_ref_init_error(dpde, error, **kwargs):
"""
Function that identifies where the continuous gyro begins, initiates and
then carries the static errors during the continuous modes.
"""
temp = [0.0]
for coeff, inc in zip(dpde[1:, 2], error.survey.inc_rad[1:]):
if inc > kwargs['header']['XY Static Gyro']['End Inc']:
temp.append(temp[-1])
else:
temp.append(coeff)
dpde[:, 2] = temp
return dpde
|
45f4072139f007f65872223c624581b7433ea2aa
| 19,850 |
import re
import os
def get_free_hugepages(socket=None):
"""Get the free hugepage totals on the system.
:param socket: optional socket param to get free hugepages on a socket. To
be passed a string.
:returns: hugepage amount as int
"""
hugepage_free_re = re.compile(r'HugePages_Free:\s+(?P<free_hp>\d+)$')
if socket:
if os.path.exists(
'/sys/devices/system/node/node{}/meminfo'.format(socket)):
meminfo_path = '/sys/devices/system/node/node{}/meminfo'.format(
socket)
else:
_LOGGER.info('No hugepage info found for socket %s', socket)
return 0
else:
meminfo_path = '/proc/meminfo'
with open(meminfo_path, 'r') as result_file:
data = result_file.readlines()
for line in data:
match = hugepage_free_re.search(line)
if match:
_LOGGER.info('Hugepages free: %s %s', match.group('free_hp'),
'on socket {}'.format(socket) if socket else '')
return int(match.group('free_hp'))
_LOGGER.info('Could not parse for hugepage size')
return 0
|
81b37320cf70d61b04509c502dd900d2464719d4
| 19,851 |
import subprocess
def umount(path, bg=False):
"""Umount dfuse from a given path"""
if bg:
cmd = ['fusermount3', '-uz', path]
else:
cmd = ['fusermount3', '-u', path]
ret = subprocess.run(cmd, check=False)
print('rc from umount {}'.format(ret.returncode))
return ret.returncode
|
1b21cb10c5bf1a4829ea001218ac2fee80e1aa40
| 19,852 |
def parse_internal_ballot(line):
"""
Parse an internal ballot line (with or without a trailing newline).
This function allows leading and trailing spaces. ValueError is
raised if one of the values does not parse to an integer.
An internal ballot line is a space-delimited string of integers of the
form--
"WEIGHT CHOICE1 CHOICE2 CHOICE3 ...".
"""
ints = parse_integer_line(line)
weight = next(ints)
choices = tuple(ints)
return weight, choices
|
9433a496f26dd3511ff343686402e941a617f775
| 19,853 |
def get_fratio(*args):
"""
"""
cmtr = get_cmtr(*args)
cme = get_cme(*args)
fratio = cmtr / cme
return fratio
|
2a93d1211929346fc333837b711ed0eb01f34b2b
| 19,854 |
def _select_by_property(peak_properties, pmin, pmax):
"""
Evaluate where the generic property of peaks confirms to an interval.
Parameters
----------
peak_properties : ndarray
An array with properties for each peak.
pmin : None or number or ndarray
Lower interval boundary for `peak_properties`. ``None`` is interpreted as
an open border.
pmax : None or number or ndarray
Upper interval boundary for `peak_properties`. ``None`` is interpreted as
an open border.
Returns
-------
keep : bool
A boolean mask evaluating to true where `peak_properties` confirms to the
interval.
See Also
--------
find_peaks
Notes
-----
.. versionadded:: 1.1.0
"""
keep = np.ones(peak_properties.size, dtype=bool)
if pmin is not None:
keep &= (pmin <= peak_properties)
if pmax is not None:
keep &= (peak_properties <= pmax)
return keep
|
12fda9525334d8a2a50e1a4785587dbbb0a70f00
| 19,855 |
def from_period_type_name(period_type_name: str) -> PeriodType:
"""
Safely get Period Type from its name.
:param period_type_name: Name of the period type.
:return: Period type enum.
"""
period_type_values = [item.value for item in PeriodType]
if period_type_name.lower() not in period_type_values:
raise AttributeError(f"Non-existent period type {period_type_name}, supported types: {period_type_values}")
else:
return PeriodType(period_type_name.lower())
|
97feb3bd1f18c1752ba4510628411f23ea77acb1
| 19,856 |
import random
import string
def randomInt(length=4, seed=None):
"""
Returns random integer value with provided number of digits
>>> random.seed(0)
>>> randomInt(6)
874254
"""
if seed is not None:
_ = getCurrentThreadData().random
_.seed(seed)
choice = _.choice
else:
choice = random.choice
return int("".join(choice(string.digits if _ != 0 else string.digits.replace('0', '')) for _ in xrange(0, length)))
|
3d37b6410337271c6798cb1b3542189fcdd04226
| 19,857 |
import re
def matchatleastone(text, regexes):
"""Returns a list of strings that match at least one of the regexes."""
finalregex = "|".join(regexes)
result = re.findall(finalregex, text)
return result
|
1e0775413189931fc48a3dc82c23f0ffe28b333e
| 19,858 |
def safe_string_equals(a, b):
""" Near-constant time string comparison.
Used in order to avoid timing attacks on sensitive information such
as secret keys during request verification (`rootLabs`_).
.. _`rootLabs`: http://rdist.root.org/2010/01/07/timing-independent-array-comparison/
"""
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
|
6253b747061dfdc82a533b103009f0ab469a76ef
| 19,859 |
def DFS_complete(g):
"""Perform DFS for entire graph and return forest as a dictionary.
forest maps each vertex v to the edge that was used to discover it.
(Vertices that are roots of a DFS tree are mapped to None.)
:param g: a Graph class object
:type g: Graph
:return: A tuple of dicts summarizing the clusters of the input graph. The second returned value,
that which is of interest in this project, is a dict where a key is a discovery vertex of a cluster
and its corresponding value is the list of vertices in its cluster.
:rtype: tuple
"""
forest = {}
clusters = {}
for u in g.vertices():
if u not in forest:
forest[u] = None # u will be the root of a tree
cluster = [u]
DFS(g, u, forest, cluster)
clusters[u] = cluster
return forest, clusters
|
7cf500d204b70cbcb9cedf33dda42cb2b717e162
| 19,860 |
def get_keys(mapping, *keys):
"""Return the values corresponding to the given keys, in order."""
return (mapping[k] for k in keys)
|
e3b8bdbdff47c428e4618bd4ca03c7179b9f4a2b
| 19,861 |
def accents_dewinize(text):
"""Replace Win1252 symbols with ASCII chars or sequences
needed when copying code parts from MS Office, like Word...
From the book "Fluent Python" by Luciano Ramalho (O'Reilly, 2015)
>>> accents_dewinize('“Stupid word • error inside™ ”')
'"Stupid word - error inside(TM) "'
"""
return sanitize.dewinize(text)
|
d34a4cd694b4d713ac7b207680e26d7c79f2b957
| 19,862 |
def spiral_trajectory(base_resolution,
spiral_arms,
field_of_view,
max_grad_ampl,
min_rise_time,
dwell_time,
views=1,
phases=None,
ordering='linear',
angle_range='full',
tiny_number=7,
readout_os=2.0,
gradient_delay=0.0,
larmor_const=42.577478518,
vd_inner_cutoff=1.0,
vd_outer_cutoff=1.0,
vd_outer_density=1.0,
vd_type='linear'):
"""Calculate a spiral trajectory.
Args:
base_resolution: An `int`. The base resolution, or number of pixels in the
readout dimension.
spiral_arms: An `int`. The number of spiral arms that a fully sampled
k-space should be divided into.
field_of_view: A `float`. The field of view, in mm.
max_grad_ampl: A `float`. The maximum allowed gradient amplitude, in mT/m.
min_rise_time: A `float`. The minimum allowed rise time, in us/(mT/m).
dwell_time: A `float`. The digitiser's real dwell time, in us. This does not
include oversampling. The effective dwell time (with oversampling) is
equal to `dwell_time * readout_os`.
views: An `int`. The number of radial views per phase.
phases: An `int`. The number of phases for cine acquisitions. If `None`,
this is assumed to be a non-cine acquisition with no time dimension.
ordering: A `string`. The ordering type. Must be one of: `{'linear',
'golden', 'tiny', 'sorted'}`.
angle_range: A `string`. The range of the rotation angle. Must be one of:
`{'full', 'half'}`. If `angle_range` is `'full'`, the full circle/sphere
is included in the range. If `angle_range` is `'half'`, only a
semicircle/hemisphere is included.
tiny_number: An `int`. The tiny golden angle number. Only used if `ordering`
is `'tiny'` or `'tiny_half'`. Must be >= 2. Defaults to 7.
readout_os: A `float`. The readout oversampling factor. Defaults to 2.0.
gradient_delay: A `float`. The system's gradient delay relative to the ADC,
in us. Defaults to 0.0.
larmor_const: A `float`. The Larmor constant of the imaging nucleus, in
MHz/T. Defaults to 42.577478518 (the Larmor constant of the 1H nucleus).
vd_inner_cutoff: Defines the inner, high-density portion of *k*-space.
Must be between 0.0 and 1.0, where 0.0 is the center of *k*-space and 1.0
is the edge. Between 0.0 and `vd_inner_cutoff`, *k*-space will be sampled
at the Nyquist rate.
vd_outer_cutoff: Defines the outer, low-density portion of *k*-space. Must
be between 0.0 and 1.0, where 0.0 is the center of *k*-space and 1.0 is
the edge. Between `vd_outer_cutoff` and 1.0, *k*-space will be sampled at
a rate `vd_outer_density` times the Nyquist rate.
vd_outer_density: Defines the sampling density in the outer portion of
*k*-space. Must be > 0.0. Higher means more densely sampled. Multiplies
the Nyquist rate: 1.0 means sampling at the Nyquist rate, < 1.0 means
undersampled and > 1.0 means oversampled.
vd_type: Defines the rate of variation of the sampling density the
variable-density portion of *k*-space, i.e., between `vd_inner_cutoff`
and `vd_outer_cutoff`. Must be one of `'linear'`, `'quadratic'` or
`'hanning'`.
Returns:
A `Tensor` of type `float32` and shape `[views, samples, 2]` if `phases` is
`None`, or of shape `[phases, views, samples, 2]` if `phases` is not `None`.
`samples` is equal to `base_resolution * readout_os`. The units are
radians/voxel, ie, values are in the range `[-pi, pi]`.
References:
.. [1] Pipe, J.G. and Zwart, N.R. (2014), Spiral trajectory design: A
flexible numerical algorithm and base analytical equations. Magn. Reson.
Med, 71: 278-285. https://doi.org/10.1002/mrm.24675
"""
return _kspace_trajectory('spiral',
{'base_resolution': base_resolution,
'spiral_arms': spiral_arms,
'field_of_view': field_of_view,
'max_grad_ampl': max_grad_ampl,
'min_rise_time': min_rise_time,
'dwell_time': dwell_time,
'readout_os': readout_os,
'gradient_delay': gradient_delay,
'larmor_const': larmor_const,
'vd_inner_cutoff': vd_inner_cutoff,
'vd_outer_cutoff': vd_outer_cutoff,
'vd_outer_density': vd_outer_density,
'vd_type': vd_type},
views=views,
phases=phases,
ordering=ordering,
angle_range=angle_range,
tiny_number=tiny_number)
|
b7acf7a14835e63e1f2524a5ac7dd67d16a4eba7
| 19,863 |
def total_examples(X):
"""Counts the total number of examples of a sharded and sliced data object X."""
count = 0
for i in range(len(X)):
for j in range(len(X[i])):
count += len(X[i][j])
return count
|
faf42a940e4413405d97610858e13496eb848eae
| 19,864 |
def convert_and_save(model,
input_shape,
weights=True,
quiet=True,
ignore_tests=False,
input_range=None,
filename=None,
directory=None):
"""
Conversion between PyTorch and Keras, and automatic save
(Conversions from Keras to PyTorch aren't implemented)
Arguments:
-model:
A Keras or PyTorch model or layer to convert
-input_shape:
Input shape (list, tuple or int), without batchsize.
-weights (bool):
Also convert weights. If set to false, only convert model
architecture
-quiet (bool):
If a progress bar and some messages should appear
-ignore_tests (bool):
If tests should be ignored.
If set to True, converted model will
still be tested by security. If models are not identical, it will
only print a warning.
If set to False, and models are not identical, RuntimeWarning will
be raised
If weights is False, tests are automatically ignored
-input_range:
Optionnal.
A list of 2 elements containing max and min values to give as
input to the model when performing the tests. If None, models will
be tested on samples from the "standard normal" distribution.
-filename:
Filename to give to model's hdf5 file. If filename is not None and
save is not False, then save will automatically be set to True
-directory:
Where to save model's hdf5 file. If directory is not None and
save is not False, then save will automatically be set to True
Returns:
Name of created hdf5 file
"""
return convert(model=model,
input_shape=input_shape,
weights=weights,
quiet=quiet,
ignore_tests=ignore_tests,
input_range=input_range,
save=True,
filename=filename,
directory=directory)
|
00305f6d9a163b61a963e04e810a8d3808403d23
| 19,865 |
def no_afni():
""" Checks if AFNI is available """
if Info.version() is None:
return True
return False
|
fc10292bc69ca5996a76227c3bbbd5855eb2520e
| 19,866 |
def delta_eta_plot_projection_range_string(inclusive_analysis: "correlations.Correlations") -> str:
""" Provides a string that describes the delta phi projection range for delta eta plots. """
# The limit is almost certainly a multiple of pi, so we try to express it more naturally
# as a value like pi/2 or 3*pi/2
value = _find_pi_coefficient(value = inclusive_analysis.near_side_phi_region.max)
return labels.make_valid_latex_string(
fr"$|\Delta\varphi|<{value}$"
)
|
3b36d65a3223ca2989ad9607fc2b15b298b1c709
| 19,867 |
import subprocess
def select_gpu(gpu_ids=None, gpu_mem_frac=None):
"""
Find the GPU ID with highest available memory fraction.
If ID is given as input, set the gpu_mem_frac to maximum available,
or if a memory fraction is given, make sure the given GPU has the desired
memory fraction available.
Currently only supports single GPU runs.
:param int gpu_ids: Desired GPU ID. If None, find GPU with the most memory
available.
:param float gpu_mem_frac: Desired GPU memory fraction [0, 1]. If None,
use maximum available amount of GPU.
:return int gpu_ids: GPU ID to use.
:return float cur_mem_frac: GPU memory fraction to use
:raises NotImplementedError: If gpu_ids is not int
:raises AssertionError: If requested memory fraction isn't available
"""
# Check if user has specified a GPU ID to use
if not isinstance(gpu_ids, type(None)):
# Currently only supporting one GPU as input
if not isinstance(gpu_ids, int):
raise NotImplementedError
if gpu_ids == -1:
return -1, 0
cur_mem_frac = check_gpu_availability(gpu_ids)
if not isinstance(gpu_mem_frac, type(None)):
if isinstance(gpu_mem_frac, float):
gpu_mem_frac = [gpu_mem_frac]
assert np.all(np.array(cur_mem_frac >= gpu_mem_frac)), \
("Not enough memory available. Requested/current fractions:",
"\n".join([str(c) + " / " + "{0:.4g}".format(m)
for c, m in zip(gpu_mem_frac, cur_mem_frac)]))
return gpu_ids, cur_mem_frac[0]
# User has not specified GPU ID, find the GPU with most memory available
sp = subprocess.Popen(['nvidia-smi --query-gpu=index --format=csv'],
stdout=subprocess.PIPE,
shell=True)
gpu_ids = sp.communicate()
gpu_ids = gpu_ids[0].decode('utf8')
gpu_ids = gpu_ids.split('\n')
# If no GPUs are found, run on CPU (debug mode)
if len(gpu_ids) <= 2:
print('No GPUs found, run will be slow. Query result: {}'.format(gpu_ids))
return -1, 0
gpu_ids = [int(gpu_id) for gpu_id in gpu_ids[1:-1]]
cur_mem_frac = check_gpu_availability(gpu_ids)
# Get the GPU with maximum memory fraction
max_mem = max(cur_mem_frac)
idx = cur_mem_frac.index(max_mem)
gpu_id = gpu_ids[idx]
# Subtract a little margin to be safe
max_mem = max_mem - np.finfo(np.float32).eps
print('Using GPU {} with memory fraction {}.'.format(gpu_id, max_mem))
return gpu_id, max_mem
|
cc903bc4adbf63108ce473efdde50d68dd622c6e
| 19,868 |
import torch
import random
def load_classification_dataset(
fname,
tokenizer,
input_field_a,
input_field_b=None,
label_field='label',
label_map=None,
limit=None
):
"""
Loads a dataset for classification
Parameters
==========
tokenizer : transformers.PretrainedTokenizer
Maps text to id tensors.
sentence1 :
"""
instances = []
label_map = label_map or {}
loader = LOADERS[fname.suffix]
for instance in loader(fname):
logger.debug(instance)
model_inputs = tokenizer.encode_plus(
instance[input_field_a],
instance[input_field_b] if input_field_b else None,
add_special_tokens=True,
# add_prefix_space=True,
return_tensors='pt'
)
logger.debug(model_inputs)
label = instance[label_field]
if label not in label_map:
label_map[label] = len(label_map)
label_id = label_map[label]
label_id = torch.tensor([[label_id]]) # To make collator expectation
logger.debug(f'Label id: {label_id}')
instances.append((model_inputs, label_id))
if limit:
instances = random.sample(instances, limit)
return instances, label_map
|
6099426c0a9f5246400e8878715335e0804ee90a
| 19,869 |
import os
import lzma
import dill
def test_mnist(args):
""" Calculates error rate on MNIST for a previously saved MulticlassClassifier instance.
Parameters
----------
args : argparse.Namespace
Arguments from command line.
Returns
-------
error_rate : float
The calculated error rate.
Raises
------
FileNotFound
If unable to locate multicc_mnist.dill.xz
"""
# Arguments from the command line.
process_count = args.process_count
fraction_of_mnist = args.fraction_of_mnist
# Load trained MulticlassClassifier.
print('Loading MulticlassClassifier')
load_filepath = 'multicc_mnist.dill.xz'
if os.path.isfile(load_filepath):
with lzma.open(load_filepath, 'rb') as multicc_file:
multicc = dill.load(multicc_file)
else:
raise FileNotFoundError('Unable to locate ' + load_filepath)
# Set other binary classifier predict arguments.
# We set option so the voted perceptron predict method returns a real-valued confidence score
# as opposed to a label. This is required by MulticlassClassifier.
other_bc_predict_args = tuple(['score'])
other_bc_predict_kwargs = {}
# Get data and labels for testing.
print('Loading MNIST data')
data, labels = get_data_and_labels('test', fraction_of_mnist)
# Calculate error rate on test data.
print('Computing error rate')
error_rate = multicc.error_rate(data, labels,
other_bc_predict_args, other_bc_predict_kwargs,
process_count)
return error_rate
|
c40cd781c6baefbdf7fd3d524badc10df0e73985
| 19,870 |
def has_content_in(page, language):
"""Fitler that return ``True`` if the page has any content in a
particular language.
:param page: the current page
:param language: the language you want to look at
"""
if page is None:
return False
return Content.objects.filter(page=page, language=language).count() > 0
|
6207583ad110aa098b5f556ad7a13b1b5218a1d3
| 19,871 |
def public_encrypt(key, data, oaep):
"""
public key encryption using rsa with pkcs1-oaep padding.
returns the base64-encoded encrypted data
data: the data to be encrypted, bytes
key: pem-formatted key string or bytes
oaep: whether to use oaep padding or not
"""
if isinstance(key, str):
key = key.encode("ascii")
pubkey = load_public_key(key)
if oaep:
encrypted = rsa_oaep_encrypt(pubkey, data)
else:
encrypted = rsa_pkcs1v15_encrypt(pubkey, data)
return b64encode(encrypted).decode("ascii")
|
7310a0d408deff30efad2c961518261187a89dbf
| 19,872 |
def newton_method(f, x_init = 0, epsilon = 1e-10):
"""
Newton Raphson Optimizer
...
Parameters
---
f: Function to calculate root for
x_init(optional) : initial value of x
epsilon(optional): Adjustable precision
Returns
---
x: Value of root
"""
prev_value = x_init + 2 * epsilon
value = x_init
iterations = 0
while abs(prev_value - value) > epsilon:
prev_value = value
f_dash = derivative(f, value)
value = value - f(value) / f_dash
iterations += 1
print(f"Newton Method converged in {iterations} iterations")
return value
|
5801d5f908e30551c321eaf0ec8dfbf42869e005
| 19,873 |
def create_preference_branch(this, args, callee):
"""Creates a preference branch, which can be used for testing composed
preference names."""
if args:
if args[0].is_literal:
res = this.traverser.wrap().query_interface('nsIPrefBranch')
res.hooks['preference_branch'] = args[0].as_str()
return res
|
6e6cc013b9d6c645a6a94087fe63b3a186582003
| 19,874 |
def circle(
gdf,
radius=10,
fill=True,
fill_color=None,
name="layer",
width=950,
height=550,
location=None,
color="blue",
tooltip=None,
zoom=7,
tiles="OpenStreetMap",
attr=None,
style={},
):
"""
Convert Geodataframe to geojson and plot it.
Parameters
----------
gdf : GeoDataframe
radius: radius of the circle
fill: fill the circle
fill_color: fill the circle with this color (column name or color)
name : name of the geojson layer, optional, default "layer"
width : width of the map, default 950
height : height of the map, default 550
location : center of the map rendered, default centroid of first geometry
color : color of your geometries, default blue
use random to randomize the colors (column name or color)
tooltip : hover box on the map with geometry info, default all columns
can be a list of column names
zoom : zoom level of the map, default 7
tiles : basemap, default openstreetmap,
options ['google','googlesatellite','googlehybrid'] or custom wms
attr : Attribution to external basemaps being used, default None
style : dict, additional style to geometries
Returns
-------
m : folium.map
"""
gpd_copy = _get_lat_lon(gdf.copy())
m = _folium_map(
gpd_copy, width, height, location, tiles=tiles, attr=attr, zoom_start=zoom
)
for index, row in gpd_copy.iterrows():
if tooltip is not None:
tooltip_dict = {k: v for k, v in dict(row).items() if k in tooltip}
tooltip = "".join(
[
"<p><b>{}</b> {}</p>".format(keyvalue[0], keyvalue[1])
for keyvalue in list(tooltip_dict.items())
]
)
else:
tooltip = _get_tooltip(tooltip, gdf)
if fill_color in list(gpd_copy.columns):
fill_color = row[fill_color]
if color in list(gpd_copy.columns):
color = row[color]
folium.Circle(
radius=radius,
location=[row["latitude"], row["longitude"]],
tooltip=tooltip,
popup=tooltip,
fill=fill,
color=color,
fill_color=fill_color,
).add_to(m)
return m
|
d2f2e066c2f6988f950ffce6a7655b60c91c3cec
| 19,875 |
import traceback
def no_recurse(f):
"""Wrapper function that forces a function to return True if it recurse."""
def func(*args, **kwargs):
for i in traceback.extract_stack():
if i[2] == f.__name__:
return True
return f(*args, **kwargs)
return func
|
cce02b5e8fff125040e457c66c7cc9c344e209cb
| 19,876 |
from typing import List
import pandas
from typing import Tuple
def get_tap_number(distSys: SystemClass, names: List[str]) -> pandas.DataFrame:
"""
Get the tap number of regulators.
Args:
distSys : An instance of [SystemClass][dssdata.SystemClass].
names : Regulators names
Returns:
The tap number of regulators.
"""
def get_one(reg_name: str) -> Tuple[str, int]:
distSys.dss.RegControls.Name(reg_name)
return (reg_name, int(distSys.dss.RegControls.TapNumber()))
__check_elements(names, distSys.dss.RegControls.AllNames())
return pandas.DataFrame(
data=tuple(map(get_one, names)), columns=["reg_name", "tap"]
)
|
a780b151670f261656d938ec48a5ac684c8c9d6d
| 19,877 |
def status(app: str) -> dict:
"""
:param app: The name of the Heroku app in which you want to change
:type app: str
:return: dictionary containing information about the app's status
"""
return Herokron(app).status()
|
f5251469c8388edf885ac9e4ae502549f0092703
| 19,878 |
def GetIPv4Interfaces():
"""Returns a list of IPv4 interfaces."""
interfaces = sorted(netifaces.interfaces())
return [x for x in interfaces if not x.startswith('lo')]
|
01fc53160b01e3322af8d18175fde0011d87d127
| 19,879 |
def merge_dicts(source, destination):
"""
Recursively merges two dictionaries source and destination.
The source dictionary will only be read, but the destination dictionary will be overwritten.
"""
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
merge_dicts(value, node)
else:
destination[key] = value
return destination
|
dea2d01f2cdf42c38daee8589abcc69a3f82e5c8
| 19,880 |
def create_hive_connection():
"""
Create a connection for Hive
:param username: str
:param password: str
:return: jaydebeapi.connect or None
"""
try:
conn = jaydebeapi.connect('org.apache.hive.jdbc.HiveDriver',
hive_jdbc_url,
[hive_username, hive_password],
hive_jar_path,
'')
return conn
except Exception as e:
raise Exception(e)
|
48ac2859c9ceec9129d377f722ff96786a1c9552
| 19,881 |
def main():
"""
The main function to execute upon call.
Returns
-------
int
returns integer 0 for safe executions.
"""
print('Hello World')
return 0
|
077df89bc009a12889afc6567bfd97abdb173411
| 19,882 |
def brightness(image, magnitude, name=None):
"""Adjusts the `magnitude` of brightness of an `image`.
Args:
image: An int or float tensor of shape `[height, width, num_channels]`.
magnitude: A 0-D float tensor or single floating point value above 0.0.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image`.
"""
_check_image_dtype(image)
with tf.name_scope(name or "brightness"):
dark = tf.zeros_like(image)
bright_image = blend(dark, image, magnitude)
return bright_image
|
52e3016dce51bd5435e2c4085aa0a4d50b9c3502
| 19,883 |
def sitemap_xml():
"""Sitemap XML"""
sitemap = render_template("core/sitemap.xml")
return Response(sitemap, mimetype="text/xml")
|
12d954b7f3c88f10e694e0aa1998699322a5602b
| 19,884 |
def get_CM():
"""Pertzの係数CMをndarrayとして取得する
Args:
Returns:
CM(ndarray[float]):Pertzの係数CM
"""
# pythonは0オリジンのため全て-1
CM = [0.385230, 0.385230, 0.385230, 0.462880, 0.317440,#1_1 => 0_0
0.338390, 0.338390, 0.221270, 0.316730, 0.503650,
0.235680, 0.235680, 0.241280, 0.157830, 0.269440,
0.830130, 0.830130, 0.171970, 0.841070, 0.457370,
0.548010, 0.548010, 0.478000, 0.966880, 1.036370,
0.548010, 0.548010, 1.000000, 3.012370, 1.976540,
0.582690, 0.582690, 0.229720, 0.892710, 0.569950,
0.131280, 0.131280, 0.385460, 0.511070, 0.127940,#1_2 => 0_1
0.223710, 0.223710, 0.193560, 0.304560, 0.193940,
0.229970, 0.229970, 0.275020, 0.312730, 0.244610,
0.090100, 0.184580, 0.260500, 0.687480, 0.579440,
0.131530, 0.131530, 0.370190, 1.380350, 1.052270,
1.116250, 1.116250, 0.928030, 3.525490, 2.316920,
0.090100, 0.237000, 0.300040, 0.812470, 0.664970,
0.587510, 0.130000, 0.400000, 0.537210, 0.832490,#1_3 => 0_2
0.306210, 0.129830, 0.204460, 0.500000, 0.681640,
0.224020, 0.260620, 0.334080, 0.501040, 0.350470,
0.421540, 0.753970, 0.750660, 3.706840, 0.983790,
0.706680, 0.373530, 1.245670, 0.864860, 1.992630,
4.864400, 0.117390, 0.265180, 0.359180, 3.310820,
0.392080, 0.493290, 0.651560, 1.932780, 0.898730,
0.126970, 0.126970, 0.126970, 0.126970, 0.126970,#1_4 => 0_3
0.810820, 0.810820, 0.810820, 0.810820, 0.810820,
3.241680, 2.500000, 2.291440, 2.291440, 2.291440,
4.000000, 3.000000, 2.000000, 0.975430, 1.965570,
12.494170, 12.494170, 8.000000, 5.083520, 8.792390,
21.744240, 21.744240, 21.744240, 21.744240, 21.744240,
3.241680, 12.494170, 1.620760, 1.375250, 2.331620,
0.126970, 0.126970, 0.126970, 0.126970, 0.126970,#1_5 => 0_4
0.810820, 0.810820, 0.810820, 0.810820, 0.810820,
3.241680, 2.500000, 2.291440, 2.291440, 2.291440,
4.000000, 3.000000, 2.000000, 0.975430, 1.965570,
12.494170, 12.494170, 8.000000, 5.083520, 8.792390,
21.744240, 21.744240, 21.744240, 21.744240, 21.744240,
3.241680, 12.494170, 1.620760, 1.375250, 2.331620,
0.126970, 0.126970, 0.126970, 0.126970, 0.126970,#1_6 => 0_5
0.810820, 0.810820, 0.810820, 0.810820, 0.810820,
3.241680, 2.500000, 2.291440, 2.291440, 2.291440,
4.000000, 3.000000, 2.000000, 0.975430, 1.965570,
12.494170, 12.494170, 8.000000, 5.083520, 8.792390,
21.744240, 21.744240, 21.744240, 21.744240, 21.744240,
3.241680, 12.494170, 1.620760, 1.375250, 2.331620,
0.337440, 0.337440, 0.969110, 1.097190, 1.116080,#2_1 => 1_0
0.337440, 0.337440, 0.969110, 1.116030, 0.623900,
0.337440, 0.337440, 1.530590, 1.024420, 0.908480,
0.584040, 0.584040, 0.847250, 0.914940, 1.289300,
0.337440, 0.337440, 0.310240, 1.435020, 1.852830,
0.337440, 0.337440, 1.015010, 1.097190, 2.117230,
0.337440, 0.337440, 0.969110, 1.145730, 1.476400,
0.300000, 0.300000, 0.700000, 1.100000, 0.796940,#2_2 => 1_1
0.219870, 0.219870, 0.526530, 0.809610, 0.649300,
0.386650, 0.386650, 0.119320, 0.576120, 0.685460,
0.746730, 0.399830, 0.470970, 0.986530, 0.785370,
0.575420, 0.936700, 1.649200, 1.495840, 1.335590,
1.319670, 4.002570, 1.276390, 2.644550, 2.518670,
0.665190, 0.678910, 1.012360, 1.199940, 0.986580,
0.378870, 0.974060, 0.500000, 0.491880, 0.665290,#2_3 => 1_2
0.105210, 0.263470, 0.407040, 0.553460, 0.582590,
0.312900, 0.345240, 1.144180, 0.854790, 0.612280,
0.119070, 0.365120, 0.560520, 0.793720, 0.802600,
0.781610, 0.837390, 1.270420, 1.537980, 1.292950,
1.152290, 1.152290, 1.492080, 1.245370, 2.177100,
0.424660, 0.529550, 0.966910, 1.033460, 0.958730,
0.310590, 0.714410, 0.252450, 0.500000, 0.607600,#2_4 => 1_3
0.975190, 0.363420, 0.500000, 0.400000, 0.502800,
0.175580, 0.196250, 0.476360, 1.072470, 0.490510,
0.719280, 0.698620, 0.657770, 1.190840, 0.681110,
0.426240, 1.464840, 0.678550, 1.157730, 0.978430,
2.501120, 1.789130, 1.387090, 2.394180, 2.394180,
0.491640, 0.677610, 0.685610, 1.082400, 0.735410,
0.597000, 0.500000, 0.300000, 0.310050, 0.413510,#2_5 => 1_4
0.314790, 0.336310, 0.400000, 0.400000, 0.442460,
0.166510, 0.460440, 0.552570, 1.000000, 0.461610,
0.401020, 0.559110, 0.403630, 1.016710, 0.671490,
0.400360, 0.750830, 0.842640, 1.802600, 1.023830,
3.315300, 1.510380, 2.443650, 1.638820, 2.133990,
0.530790, 0.745850, 0.693050, 1.458040, 0.804500,
0.597000, 0.500000, 0.300000, 0.310050, 0.800920,#2_6 => 1_5
0.314790, 0.336310, 0.400000, 0.400000, 0.237040,
0.166510, 0.460440, 0.552570, 1.000000, 0.581990,
0.401020, 0.559110, 0.403630, 1.016710, 0.898570,
0.400360, 0.750830, 0.842640, 1.802600, 3.400390,
3.315300, 1.510380, 2.443650, 1.638820, 2.508780,
0.204340, 1.157740, 2.003080, 2.622080, 1.409380,
1.242210, 1.242210, 1.242210, 1.242210, 1.242210,#3_1 => 2_0
0.056980, 0.056980, 0.656990, 0.656990, 0.925160,
0.089090, 0.089090, 1.040430, 1.232480, 1.205300,
1.053850, 1.053850, 1.399690, 1.084640, 1.233340,
1.151540, 1.151540, 1.118290, 1.531640, 1.411840,
1.494980, 1.494980, 1.700000, 1.800810, 1.671600,
1.018450, 1.018450, 1.153600, 1.321890, 1.294670,
0.700000, 0.700000, 1.023460, 0.700000, 0.945830,#3_2 => 2_1
0.886300, 0.886300, 1.333620, 0.800000, 1.066620,
0.902180, 0.902180, 0.954330, 1.126690, 1.097310,
1.095300, 1.075060, 1.176490, 1.139470, 1.096110,
1.201660, 1.201660, 1.438200, 1.256280, 1.198060,
1.525850, 1.525850, 1.869160, 1.985410, 1.911590,
1.288220, 1.082810, 1.286370, 1.166170, 1.119330,
0.600000, 1.029910, 0.859890, 0.550000, 0.813600,#3_3 => 2_2
0.604450, 1.029910, 0.859890, 0.656700, 0.928840,
0.455850, 0.750580, 0.804930, 0.823000, 0.911000,
0.526580, 0.932310, 0.908620, 0.983520, 0.988090,
1.036110, 1.100690, 0.848380, 1.035270, 1.042380,
1.048440, 1.652720, 0.900000, 2.350410, 1.082950,
0.817410, 0.976160, 0.861300, 0.974780, 1.004580,
0.782110, 0.564280, 0.600000, 0.600000, 0.665740,#3_4 => 2_3
0.894480, 0.680730, 0.541990, 0.800000, 0.669140,
0.487460, 0.818950, 0.841830, 0.872540, 0.709040,
0.709310, 0.872780, 0.908480, 0.953290, 0.844350,
0.863920, 0.947770, 0.876220, 1.078750, 0.936910,
1.280350, 0.866720, 0.769790, 1.078750, 0.975130,
0.725420, 0.869970, 0.868810, 0.951190, 0.829220,
0.791750, 0.654040, 0.483170, 0.409000, 0.597180,#3_5 => 2_4
0.566140, 0.948990, 0.971820, 0.653570, 0.718550,
0.648710, 0.637730, 0.870510, 0.860600, 0.694300,
0.637630, 0.767610, 0.925670, 0.990310, 0.847670,
0.736380, 0.946060, 1.117590, 1.029340, 0.947020,
1.180970, 0.850000, 1.050000, 0.950000, 0.888580,
0.700560, 0.801440, 0.961970, 0.906140, 0.823880,
0.500000, 0.500000, 0.586770, 0.470550, 0.629790,#3_6 => 2_5
0.500000, 0.500000, 1.056220, 1.260140, 0.658140,
0.500000, 0.500000, 0.631830, 0.842620, 0.582780,
0.554710, 0.734730, 0.985820, 0.915640, 0.898260,
0.712510, 1.205990, 0.909510, 1.078260, 0.885610,
1.899260, 1.559710, 1.000000, 1.150000, 1.120390,
0.653880, 0.793120, 0.903320, 0.944070, 0.796130,
1.000000, 1.000000, 1.050000, 1.170380, 1.178090,#4_1 => 3_0
0.960580, 0.960580, 1.059530, 1.179030, 1.131690,
0.871470, 0.871470, 0.995860, 1.141910, 1.114600,
1.201590, 1.201590, 0.993610, 1.109380, 1.126320,
1.065010, 1.065010, 0.828660, 0.939970, 1.017930,
1.065010, 1.065010, 0.623690, 1.119620, 1.132260,
1.071570, 1.071570, 0.958070, 1.114130, 1.127110,
0.950000, 0.973390, 0.852520, 1.092200, 1.096590,#4_2 => 3_1
0.804120, 0.913870, 0.980990, 1.094580, 1.042420,
0.737540, 0.935970, 0.999940, 1.056490, 1.050060,
1.032980, 1.034540, 0.968460, 1.032080, 1.015780,
0.900000, 0.977210, 0.945960, 1.008840, 0.969960,
0.600000, 0.750000, 0.750000, 0.844710, 0.899100,
0.926800, 0.965030, 0.968520, 1.044910, 1.032310,
0.850000, 1.029710, 0.961100, 1.055670, 1.009700,#4_3 => 3_2
0.818530, 0.960010, 0.996450, 1.081970, 1.036470,
0.765380, 0.953500, 0.948260, 1.052110, 1.000140,
0.775610, 0.909610, 0.927800, 0.987800, 0.952100,
1.000990, 0.881880, 0.875950, 0.949100, 0.893690,
0.902370, 0.875960, 0.807990, 0.942410, 0.917920,
0.856580, 0.928270, 0.946820, 1.032260, 0.972990,
0.750000, 0.857930, 0.983800, 1.056540, 0.980240,#4_4 => 3_3
0.750000, 0.987010, 1.013730, 1.133780, 1.038250,
0.800000, 0.947380, 1.012380, 1.091270, 0.999840,
0.800000, 0.914550, 0.908570, 0.999190, 0.915230,
0.778540, 0.800590, 0.799070, 0.902180, 0.851560,
0.680190, 0.317410, 0.507680, 0.388910, 0.646710,
0.794920, 0.912780, 0.960830, 1.057110, 0.947950,
0.750000, 0.833890, 0.867530, 1.059890, 0.932840,#4_5 => 3_4
0.979700, 0.971470, 0.995510, 1.068490, 1.030150,
0.858850, 0.987920, 1.043220, 1.108700, 1.044900,
0.802400, 0.955110, 0.911660, 1.045070, 0.944470,
0.884890, 0.766210, 0.885390, 0.859070, 0.818190,
0.615680, 0.700000, 0.850000, 0.624620, 0.669300,
0.835570, 0.946150, 0.977090, 1.049350, 0.979970,
0.689220, 0.809600, 0.900000, 0.789500, 0.853990,#4_6 => 3_5
0.854660, 0.852840, 0.938200, 0.923110, 0.955010,
0.938600, 0.932980, 1.010390, 1.043950, 1.041640,
0.843620, 0.981300, 0.951590, 0.946100, 0.966330,
0.694740, 0.814690, 0.572650, 0.400000, 0.726830,
0.211370, 0.671780, 0.416340, 0.297290, 0.498050,
0.843540, 0.882330, 0.911760, 0.898420, 0.960210,
1.054880, 1.075210, 1.068460, 1.153370, 1.069220,#5_1 => 4_0
1.000000, 1.062220, 1.013470, 1.088170, 1.046200,
0.885090, 0.993530, 0.942590, 1.054990, 1.012740,
0.920000, 0.950000, 0.978720, 1.020280, 0.984440,
0.850000, 0.908500, 0.839940, 0.985570, 0.962180,
0.800000, 0.800000, 0.810080, 0.950000, 0.961550,
1.038590, 1.063200, 1.034440, 1.112780, 1.037800,
1.017610, 1.028360, 1.058960, 1.133180, 1.045620,#5_2 => 4_1
0.920000, 0.998970, 1.033590, 1.089030, 1.022060,
0.912370, 0.949930, 0.979770, 1.020420, 0.981770,
0.847160, 0.935300, 0.930540, 0.955050, 0.946560,
0.880260, 0.867110, 0.874130, 0.972650, 0.883420,
0.627150, 0.627150, 0.700000, 0.774070, 0.845130,
0.973700, 1.006240, 1.026190, 1.071960, 1.017240,
1.028710, 1.017570, 1.025900, 1.081790, 1.024240,#5_3 => 4_2
0.924980, 0.985500, 1.014100, 1.092210, 0.999610,
0.828570, 0.934920, 0.994950, 1.024590, 0.949710,
0.900810, 0.901330, 0.928830, 0.979570, 0.913100,
0.761030, 0.845150, 0.805360, 0.936790, 0.853460,
0.626400, 0.546750, 0.730500, 0.850000, 0.689050,
0.957630, 0.985480, 0.991790, 1.050220, 0.987900,
0.992730, 0.993880, 1.017150, 1.059120, 1.017450,#5_4 => 4_3
0.975610, 0.987160, 1.026820, 1.075440, 1.007250,
0.871090, 0.933190, 0.974690, 0.979840, 0.952730,
0.828750, 0.868090, 0.834920, 0.905510, 0.871530,
0.781540, 0.782470, 0.767910, 0.764140, 0.795890,
0.743460, 0.693390, 0.514870, 0.630150, 0.715660,
0.934760, 0.957870, 0.959640, 0.972510, 0.981640,
0.965840, 0.941240, 0.987100, 1.022540, 1.011160,#5_5 => 4_4
0.988630, 0.994770, 0.976590, 0.950000, 1.034840,
0.958200, 1.018080, 0.974480, 0.920000, 0.989870,
0.811720, 0.869090, 0.812020, 0.850000, 0.821050,
0.682030, 0.679480, 0.632450, 0.746580, 0.738550,
0.668290, 0.445860, 0.500000, 0.678920, 0.696510,
0.926940, 0.953350, 0.959050, 0.876210, 0.991490,
0.948940, 0.997760, 0.850000, 0.826520, 0.998470,#5_6 => 4_5
1.017860, 0.970000, 0.850000, 0.700000, 0.988560,
1.000000, 0.950000, 0.850000, 0.606240, 0.947260,
1.000000, 0.746140, 0.751740, 0.598390, 0.725230,
0.922210, 0.500000, 0.376800, 0.517110, 0.548630,
0.500000, 0.450000, 0.429970, 0.404490, 0.539940,
0.960430, 0.881630, 0.775640, 0.596350, 0.937680,
1.030000, 1.040000, 1.000000, 1.000000, 1.049510,#6_1 => 5_0
1.050000, 0.990000, 0.990000, 0.950000, 0.996530,
1.050000, 0.990000, 0.990000, 0.820000, 0.971940,
1.050000, 0.790000, 0.880000, 0.820000, 0.951840,
1.000000, 0.530000, 0.440000, 0.710000, 0.928730,
0.540000, 0.470000, 0.500000, 0.550000, 0.773950,
1.038270, 0.920180, 0.910930, 0.821140, 1.034560,
1.041020, 0.997520, 0.961600, 1.000000, 1.035780,#6_2 => 5_1
0.948030, 0.980000, 0.900000, 0.950360, 0.977460,
0.950000, 0.977250, 0.869270, 0.800000, 0.951680,
0.951870, 0.850000, 0.748770, 0.700000, 0.883850,
0.900000, 0.823190, 0.727450, 0.600000, 0.839870,
0.850000, 0.805020, 0.692310, 0.500000, 0.788410,
1.010090, 0.895270, 0.773030, 0.816280, 1.011680,
1.022450, 1.004600, 0.983650, 1.000000, 1.032940,#6_3 => 5_2
0.943960, 0.999240, 0.983920, 0.905990, 0.978150,
0.936240, 0.946480, 0.850000, 0.850000, 0.930320,
0.816420, 0.885000, 0.644950, 0.817650, 0.865310,
0.742960, 0.765690, 0.561520, 0.700000, 0.827140,
0.643870, 0.596710, 0.474460, 0.600000, 0.651200,
0.971740, 0.940560, 0.714880, 0.864380, 1.001650,
0.995260, 0.977010, 1.000000, 1.000000, 1.035250,#6_4 => 5_3
0.939810, 0.975250, 0.939980, 0.950000, 0.982550,
0.876870, 0.879440, 0.850000, 0.900000, 0.917810,
0.873480, 0.873450, 0.751470, 0.850000, 0.863040,
0.761470, 0.702360, 0.638770, 0.750000, 0.783120,
0.734080, 0.650000, 0.600000, 0.650000, 0.715660,
0.942160, 0.919100, 0.770340, 0.731170, 0.995180,
0.952560, 0.916780, 0.920000, 0.900000, 1.005880,#6_5 => 5_4
0.928620, 0.994420, 0.900000, 0.900000, 0.983720,
0.913070, 0.850000, 0.850000, 0.800000, 0.924280,
0.868090, 0.807170, 0.823550, 0.600000, 0.844520,
0.769570, 0.719870, 0.650000, 0.550000, 0.733500,
0.580250, 0.650000, 0.600000, 0.500000, 0.628850,
0.904770, 0.852650, 0.708370, 0.493730, 0.949030,
0.911970, 0.800000, 0.800000, 0.800000, 0.956320,#6_6 => 5_5
0.912620, 0.682610, 0.750000, 0.700000, 0.950110,
0.653450, 0.659330, 0.700000, 0.600000, 0.856110,
0.648440, 0.600000, 0.641120, 0.500000, 0.695780,
0.570000, 0.550000, 0.598800, 0.40000 , 0.560150,
0.475230, 0.500000, 0.518640, 0.339970, 0.520230,
0.743440, 0.592190, 0.603060, 0.316930, 0.794390 ]
return np.array(CM, dtype=float).reshape((6,6,7,5))
|
434bab68e2aa434a79a53dd91a4932e631a6367c
| 19,885 |
def remove_sleepEDF(mne_raw, CHANNELS):
"""Extracts CHANNELS channels from MNE_RAW data.
Args:
raw - mne data strucutre of n number of recordings and t seconds each
CHANNELS - channels wished to be extracted
Returns:
extracted - mne data structure with only specified channels
"""
extracted = mne_raw.pick_channels(CHANNELS)
return extracted
|
7bb04810676a127742d391c518fc505bf7568aac
| 19,886 |
from datetime import datetime
import pytz
def save_email_schedule(request, action, schedule_item, op_payload):
"""
Function to handle the creation and edition of email items
:param request: Http request being processed
:param action: Action item related to the schedule
:param schedule_item: Schedule item or None if it is new
:param op_payload: dictionary to carry over the request to the next step
:return:
"""
# Create the form to ask for the email subject and other information
form = EmailScheduleForm(
data=request.POST or None,
action=action,
instance=schedule_item,
columns=action.workflow.columns.filter(is_key=True),
confirm_items=op_payload.get('confirm_items', False))
# Check if the request is GET, or POST but not valid
if request.method == 'GET' or not form.is_valid():
now = datetime.datetime.now(pytz.timezone(settings.TIME_ZONE))
# Render the form
return render(request,
'scheduler/edit.html',
{'action': action,
'form': form,
'now': now})
# Processing a valid POST request
# Save the schedule item object
s_item = form.save(commit=False)
# Assign additional fields and save
s_item.user = request.user
s_item.action = action
s_item.status = ScheduledAction.STATUS_CREATING
s_item.payload = {
'subject': form.cleaned_data['subject'],
'cc_email': [x for x in form.cleaned_data['cc_email'].split(',')
if
x],
'bcc_email': [x
for x in form.cleaned_data['bcc_email'].split(',')
if
x],
'send_confirmation': form.cleaned_data['send_confirmation'],
'track_read': form.cleaned_data['track_read']
}
# Verify that that action does comply with the name uniqueness
# property (only with respec to other actions)
try:
s_item.save()
except IntegrityError as e:
# There is an action with this name already
form.add_error('name',
_(
'A scheduled execution of this action with this name '
'already exists'))
return render(request,
'scheduler/edit.html',
{'action': action,
'form': form,
'now': datetime.datetime.now(pytz.timezone(
settings.TIME_ZONE))})
# Upload information to the op_payload
op_payload['schedule_id'] = s_item.id
op_payload['confirm_items'] = form.cleaned_data['confirm_items']
if op_payload['confirm_items']:
# Update information to carry to the filtering stage
op_payload['exclude_values'] = s_item.exclude_values
op_payload['item_column'] = s_item.item_column.name
op_payload['button_label'] = ugettext('Schedule')
request.session[action_session_dictionary] = op_payload
return redirect('action:item_filter')
else:
# If there is not item_column, the exclude values should be empty.
s_item.exclude_values = []
s_item.save()
# Go straight to the final step
return finish_scheduling(request, s_item, op_payload)
|
3a14e11a195bcf96ac132d2876e61ce52f0b8ccd
| 19,887 |
def slice(
_data: DataFrame,
*rows: NumericOrIter,
_preserve: bool = False,
base0_: bool = None,
) -> DataFrame:
"""Index rows by their (integer) locations
Original APIs https://dplyr.tidyverse.org/reference/slice.html
Args:
_data: The dataframe
rows: The indexes
Ranges can be specified as `f[1:3]`
Note that the negatives mean differently than in dplyr.
In dplyr, negative numbers meaning exclusive, but here negative
numbers are negative indexes like how they act in python indexing.
For exclusive indexes, you need to use inversion. For example:
`slice(df, ~f[:3])` excludes first 3 rows. You can also do:
`slice(df, ~c(f[:3], 6))` to exclude multiple set of rows.
To exclude a single row, you can't do this directly: `slice(df, ~1)`
since `~1` is directly compiled into a number. You can do this
instead: `slice(df, ~c(1))`
Exclusive and inclusive expressions are allowed to be mixed, unlike
in `dplyr`. They are expanded in the order they are passed in.
_preserve: Relevant when the _data input is grouped.
If _preserve = FALSE (the default), the grouping structure is
recalculated based on the resulting data,
otherwise the grouping is kept as is.
base0_: If rows are selected by indexes, whether they are 0-based.
If not provided, `datar.base.get_option('index.base.0')` is used.
Returns:
The sliced dataframe
"""
if not rows:
return _data
rows = _sanitize_rows(rows, _data.shape[0], base0_)
out = _data.iloc[rows, :]
if isinstance(_data.index, RangeIndex):
out.reset_index(drop=True, inplace=True)
# copy_attrs(out, _data) # attrs carried
return out
|
a58d2ae140d1e441100f7f71587588b93ecaa7b4
| 19,888 |
def get_random_color():
"""
Get random color
:return: np.array([r,g,b])
"""
global _start_color, _color_step
# rgb = np.random.uniform(0, 25, [3])
# rgb = np.asarray(np.floor(rgb) / 24 * 255, np.uint8)
_start_color = (_start_color + _color_step) % np.array([256, 256, 256])
rgb = np.asarray(_start_color, np.uint8).tolist()
return rgb
|
3c9596f264e75c064f76a56d71e06dbe55669936
| 19,889 |
def get_client_ip(request):
"""
Simple function to return IP address of client
:param request:
:return:
"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0] # pylint: disable=invalid-name
else:
ip = request.META.get('REMOTE_ADDR') # pylint: disable=invalid-name
return ip
|
976755d296127a42de5b6d7c39bfc9a607b273ee
| 19,890 |
def entropy(wair,temp,pres,airf=None,dhum=None,dliq=None,chkvals=False,
chktol=_CHKTOL,airf0=None,dhum0=None,dliq0=None,chkbnd=False,
mathargs=None):
"""Calculate wet air entropy.
Calculate the specific entropy of wet air.
:arg float wair: Total dry air fraction in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg airf: Dry air fraction in humid air in kg/kg.
:type airf: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the dry fraction in kg/kg. If None
(default) then `iceair4a._approx_tp` is used.
:type airf0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then `liqair4a._approx_tp` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `liqair4a._approx_tp` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Entropy in J/kg/K.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:raises RuntimeWarning: If air with the given parameters would be
unsaturated.
:Examples:
>>> entropy(0.5,300.,1e5)
343.783393872
"""
g_t = liqair_g(0,1,0,wair,temp,pres,airf=airf,dhum=dhum,dliq=dliq,
chkvals=chkvals,chktol=chktol,airf0=airf0,dhum0=dhum0,dliq0=dliq0,
chkbnd=chkbnd,mathargs=mathargs)
s = -g_t
return s
|
4cd0b53bdf549a0f53d2543e693f6b179a0f0915
| 19,891 |
def get_list_item(view, index):
"""
get item from listView by index
version 1
:param view:
:param index:
:return:
"""
return var_cache['proxy'].get_list_item(view, index)
|
bcb1db741a87bc2c12686ade8e692449030eb9cf
| 19,892 |
def interquartile_range_checker(train_user: list) -> float:
"""
Optional method: interquatile range
input : list of total user in float
output : low limit of input in float
this method can be used to check whether some data is outlier or not
>>> interquartile_range_checker([1,2,3,4,5,6,7,8,9,10])
2.8
"""
train_user.sort()
q1 = np.percentile(train_user, 25)
q3 = np.percentile(train_user, 75)
iqr = q3 - q1
low_lim = q1 - (iqr * 0.1)
return low_lim
|
29eba79cef5c91e491250780ed3fb7ca74d7cace
| 19,893 |
def make_linear_colorscale(colors):
"""
Makes a list of colors into a colorscale-acceptable form
For documentation regarding to the form of the output, see
https://plot.ly/python/reference/#mesh3d-colorscale
"""
scale = 1.0 / (len(colors) - 1)
return [[i * scale, color] for i, color in enumerate(colors)]
|
dabd2a2a9d6bbf3acfcabcac52246048332fae73
| 19,894 |
def background(image_size: int, level: float=0, grad_i: float=0, grad_d: float=0) -> np.array:
"""
Return array representing image background of size `image_size`.
The image may have an illimination gradient of intensity `I` and direction `grad_d`.
The `image_size` is in pixels. `grad_i` expected to be between 0 and 1.
`grad_d` is gradient direction in radians.
"""
h = image_size // 2
background = np.ones((image_size,image_size)) * level
ix,iy = np.meshgrid(np.arange(-h, h + 1), np.arange(-h, h + 1))
illumination_gradient = grad_i * ((ix * np.sin(grad_d)) + (iy * np.cos(grad_d))) / (np.sqrt(2) * image_size)
return background + illumination_gradient
|
7fc6d34ec02752604024746e707f70a82ad61450
| 19,895 |
def mapTypeCategoriesToSubnetName(nodetypecategory, acceptedtypecategory):
"""This function returns a name of the subnet that accepts nodetypecategory
as child type and can be created in a container whose child type is
acceptedtypecategory.
Returns None if these two categories are the same (ie, no need for
a subnet to accommodate nodetypecategory). Also returns None if
the mapping has not been defined yet.
"""
return ''
|
c9a31c571807cd2592340ce685b1f130f99da156
| 19,896 |
def sorted_unique(series):
"""Return the unique values of *series*, correctly sorted."""
# This handles Categorical data types, which sorted(series.unique()) fails
# on. series.drop_duplicates() is slower than Series(series.unique()).
return list(pd.Series(series.unique()).sort_values())
|
3da88962171acd15f3af020ff056afb66d284425
| 19,897 |
import json
def create_query_from_request(p, request):
"""
Create JSON object representing the query from request received from Dashboard.
:param request:
:return:
"""
query_json = {'process_type': DVAPQL.QUERY}
count = request.POST.get('count')
generate_tags = request.POST.get('generate_tags')
selected_indexers = json.loads(request.POST.get('selected_indexers',"[]"))
selected_detectors = json.loads(request.POST.get('selected_detectors',"[]"))
query_json['image_data_b64'] = request.POST.get('image_url')[22:]
query_json['tasks'] = []
indexer_tasks = defaultdict(list)
if generate_tags and generate_tags != 'false':
query_json['tasks'].append({'operation': 'perform_analysis',
'arguments': {'analyzer': 'tagger','target': 'query',}
})
if selected_indexers:
for k in selected_indexers:
indexer_pk, retriever_pk = k.split('_')
indexer_tasks[int(indexer_pk)].append(int(retriever_pk))
for i in indexer_tasks:
di = TrainedModel.objects.get(pk=i,model_type=TrainedModel.INDEXER)
rtasks = []
for r in indexer_tasks[i]:
rtasks.append({'operation': 'perform_retrieval', 'arguments': {'count': int(count), 'retriever_pk': r}})
query_json['tasks'].append(
{
'operation': 'perform_indexing',
'arguments': {
'index': di.name,
'target': 'query',
'map': rtasks
}
}
)
if selected_detectors:
for d in selected_detectors:
dd = TrainedModel.objects.get(pk=int(d),model_type=TrainedModel.DETECTOR)
if dd.name == 'textbox':
query_json['tasks'].append({'operation': 'perform_detection',
'arguments': {'detector_pk': int(d),
'target': 'query',
'map': [{
'operation': 'perform_analysis',
'arguments': {'target': 'query_regions',
'analyzer': 'crnn',
'filters': {'event_id': '__parent_event__'}
}
}]
}
})
elif dd.name == 'face':
dr = Retriever.objects.get(name='facenet',algorithm=Retriever.EXACT)
query_json['tasks'].append({'operation': 'perform_detection',
'arguments': {'detector_pk': int(d),
'target': 'query',
'map': [{
'operation': 'perform_indexing',
'arguments': {'target': 'query_regions',
'index': 'facenet',
'filters': {'event_id': '__parent_event__'},
'map':[{
'operation':'perform_retrieval',
'arguments':{'retriever_pk':dr.pk,
'filters':{'event_id': '__parent_event__'},
'target':'query_region_index_vectors',
'count':10}
}]}
}]
}
})
else:
query_json['tasks'].append({'operation': 'perform_detection',
'arguments': {'detector_pk': int(d), 'target': 'query', }})
user = request.user if request.user.is_authenticated else None
p.create_from_json(query_json, user)
return p.process
|
4936376d6d900ca20d2ea9339634d0a7d90ebc2e
| 19,898 |
from typing import List
from typing import Any
def create_cxr_transforms_from_config(config: CfgNode,
apply_augmentations: bool) -> ImageTransformationPipeline:
"""
Defines the image transformations pipeline used in Chest-Xray datasets. Can be used for other types of
images data, type of augmentations to use and strength are expected to be defined in the config.
:param config: config yaml file fixing strength and type of augmentation to apply
:param apply_augmentations: if True return transformation pipeline with augmentations. Else,
disable augmentations i.e. only resize and center crop the image.
"""
transforms: List[Any] = [ExpandChannels()]
if apply_augmentations:
if config.augmentation.use_random_affine:
transforms.append(RandomAffine(
degrees=config.augmentation.random_affine.max_angle,
translate=(config.augmentation.random_affine.max_horizontal_shift,
config.augmentation.random_affine.max_vertical_shift),
shear=config.augmentation.random_affine.max_shear
))
if config.augmentation.use_random_crop:
transforms.append(RandomResizedCrop(
scale=config.augmentation.random_crop.scale,
size=config.preprocess.resize
))
else:
transforms.append(Resize(size=config.preprocess.resize))
if config.augmentation.use_random_horizontal_flip:
transforms.append(RandomHorizontalFlip(p=config.augmentation.random_horizontal_flip.prob))
if config.augmentation.use_gamma_transform:
transforms.append(RandomGamma(scale=config.augmentation.gamma.scale))
if config.augmentation.use_random_color:
transforms.append(ColorJitter(
brightness=config.augmentation.random_color.brightness,
contrast=config.augmentation.random_color.contrast,
saturation=config.augmentation.random_color.saturation
))
if config.augmentation.use_elastic_transform:
transforms.append(ElasticTransform(
alpha=config.augmentation.elastic_transform.alpha,
sigma=config.augmentation.elastic_transform.sigma,
p_apply=config.augmentation.elastic_transform.p_apply
))
transforms.append(CenterCrop(config.preprocess.center_crop_size))
if config.augmentation.use_random_erasing:
transforms.append(RandomErasing(
scale=config.augmentation.random_erasing.scale,
ratio=config.augmentation.random_erasing.ratio
))
if config.augmentation.add_gaussian_noise:
transforms.append(AddGaussianNoise(
p_apply=config.augmentation.gaussian_noise.p_apply,
std=config.augmentation.gaussian_noise.std
))
else:
transforms += [Resize(size=config.preprocess.resize),
CenterCrop(config.preprocess.center_crop_size)]
pipeline = ImageTransformationPipeline(transforms)
return pipeline
|
9d16e844291a69d4b82681c4cdcc48f5a1b3d67f
| 19,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.