content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def combine(connected_events):
"""
Combine connected events into a graph.
:param connected_events: see polychronous.filter
:return: graph_of_connected_events
"""
graph_of_connected_events = nx.Graph()
graph_of_connected_events.add_edges_from(connected_events)
return (graph_of_connected_events) | 99471930f70bea0583d36d3c0c13fc62b23d6fe8 | 3,659,000 |
import hashlib
def calculate_hash(filepath, hash_name):
"""Calculate the hash of a file. The available hashes are given by the hashlib module. The available hashes can be listed with hashlib.algorithms_available."""
hash_name = hash_name.lower()
if not hasattr(hashlib, hash_name):
raise Exception('Hash algorithm not available : {}'\
.format(hash_name))
with open(filepath, 'rb') as f:
checksum = getattr(hashlib, hash_name)()
for chunk in iter(lambda: f.read(4096), b''):
checksum.update(chunk)
return checksum.hexdigest() | 975fe0a2a4443ca3abc67ed950fb7200409f2497 | 3,659,001 |
from pathlib import Path
from typing import Type
from typing import Tuple
from typing import List
from typing import Dict
import logging
import yaml
import sys
def specify_run_step(
args: RunConfig,
aml_resources: AMLResources,
run_script_path: Path,
loop_config_class: Type[OptimizerConfig],
check_consistency: bool = True,
) -> Tuple[List[PythonScriptStep], List[PipelineData], Dict[str, List[str]], List[str]]:
"""
Create the pipeline step(s) to run the simulation.
Args:
aml_resources: an instance of AMLResources which contains the necessary information on
AML resources to instantiate pipeline steps
run_script_path: script that the run step should invoke
loop_config_class: (subclass of) OptimizerConfig that should be instantiated
check_consistency: whether to run data_and_simulation_are_consistent; normally we do, but
this may be set to False for tests that check other parts of this functionality.
Returns: A list of PythonScriptSteps, with one for each expansion, a list of output data locations in AML,
a dictionary of styled subsets for plotting, and a list of the temporary spec files that have been created
"""
# Expand config
selections_and_configs = list(load_resolutions_from_command(args, loop_config_class))
temp_spec_files = []
parallel_steps = []
all_run_outputs = []
styled_subsets: Dict[str, List[str]] = {}
# For each expansion, create a PythonScriptStep to run the simulator script.
num_selections = len(selections_and_configs)
for index, pair_list in enumerate(selections_and_configs, 1):
config0 = pair_list[0][1]
if (not check_consistency) or data_and_simulation_are_consistent(config0):
logging.info(
f"Config resolution {index} of {num_selections} will have {len(pair_list)} runs included in pipeline"
)
else: # pragma: no cover
logging.error(f"Dropping config resolution {index} of {num_selections} from pipeline")
continue
for config_dct, config in pair_list:
batch_strategy = config_dct["bayesopt"]["batch_strategy"]
acquisition = config_dct["bayesopt"]["acquisition"]
experiment_label = f"{batch_strategy} - {acquisition}"
# TODO: what about acquisition, optimization_strategy?
if batch_strategy not in styled_subsets:
styled_subsets[batch_strategy] = [experiment_label]
else:
styled_subsets[batch_strategy].append(experiment_label) # pragma: no cover
# set up the run configuration
aml_run_config = RunConfiguration(_name=f"Parallel run combination {config.resolution_spec}.{config.seed}")
aml_run_config.target = aml_resources.compute_target
aml_run_config.environment = aml_resources.env # type: ignore # auto
# create different versions of args for each combination
temp_config_path = spec_file_basename(config.resolution_spec, config.seed or 0, suffix="yml")
temp_spec_files.append(temp_config_path)
with Path(temp_config_path).open("w") as fp:
yaml.dump(config_dct, fp, Dumper=CustomDumper)
args.spec_file = temp_config_path
original_arg_list = sys.argv[1:]
simulator_args = original_arg_list
spec_file_index = simulator_args.index("--spec_file")
simulator_args[spec_file_index + 1] = temp_config_path
num_runs_index = simulator_args.index("--num_runs")
if isinstance(num_runs_index, int) and num_runs_index >= 0:
simulator_args[num_runs_index + 1] = "1" # pragma: no cover
else:
simulator_args += ["--num_runs", "1"]
# create PipelineData to consume the output of this step in the next (plotting) step
step_output = PipelineData(
name=f"outputs_batch_{config.resolution_spec}_{config.seed}",
output_name=f"outputs_batch_{config.resolution_spec}_{config.seed}",
datastore=aml_resources.datastore,
is_directory=True,
)
all_run_outputs += [step_output]
simulator_args += ["--output_dir", step_output]
step = PythonScriptStep(
script_name=str(run_script_path.absolute().relative_to(ROOT_DIR)),
source_directory=ROOT_DIR,
arguments=simulator_args,
outputs=[step_output],
compute_target=aml_resources.compute_target,
runconfig=aml_run_config,
)
parallel_steps.append(step)
return parallel_steps, all_run_outputs, styled_subsets, temp_spec_files | 30aa58f599559d9be2c29524befe710cc26fac1d | 3,659,002 |
def default_mp_value_parameters():
"""Set the different default parameters used for mp-values.
Returns
-------
dict
A default parameter set with keys: rescale_pca (whether the PCA should be
scaled by variance explained) and nb_permutations (how many permutations to
calculate empirical p-value). Defaults to True and 100, respectively.
"""
params = {"rescale_pca": True, "nb_permutations": 100}
return params | 0dcac3981154fbf0cc1fa0eeed6e83a1e1b63294 | 3,659,003 |
import os
def change_auth_keys(server, user, auth_keys):
"""
update authorize keys. ath_keys is list of keys.
will get current auth_keys, remove keys with auth_tag, and add new
auth_keys with auth_tag.
return: if success, none. else, a dict: { stdout: xxx, stderr: yyy }
"""
auth_tag = os.getenv('AUTH_KEY_TAG')
retcode, out, err = get_auth_keys(server, user)
if retcode != 0:
return {'stdout': out, 'stderr': err}
current_keys = [x for x in out.strip().split('\n') if auth_tag not in x]
for key in auth_keys:
current_keys.append(f'{key} {auth_tag}')
retcode, out, err = set_auth_keys(server, user, ':'.join(current_keys))
if retcode != 0:
return {'stdout': out, 'stderr': err}
return 0, None, None | b1b67d168ca5b863ce6ddc62b9ecc988d70aace8 | 3,659,004 |
def svn_wc_diff(*args):
"""
svn_wc_diff(svn_wc_adm_access_t anchor, char target, svn_wc_diff_callbacks_t callbacks,
void callback_baton,
svn_boolean_t recurse, apr_pool_t pool) -> svn_error_t
"""
return _wc.svn_wc_diff(*args) | c4fbc11d26b6da2d595cb79314b0d901b084eb52 | 3,659,005 |
import re
def _FindResourceIds(header, resource_names):
"""Returns the numerical resource IDs that correspond to the given resource
names, as #defined in the given header file."
"""
pattern = re.compile(
r'^#define (%s) _Pragma\S+ (\d+)$' % '|'.join(resource_names))
with open(header, 'r') as f:
res_ids = [ int(pattern.match(line).group(2))
for line in f if pattern.match(line) ]
if len(res_ids) != len(resource_names):
raise Exception('Find resource id failed: the result is ' +
', '.join(str(i) for i in res_ids))
return set(res_ids) | 24847b1d4374a2022ae12f5161bd9df4becd110d | 3,659,006 |
import re
def resolve_request_path(requested_uri):
"""
Check for any aliases and alter the path accordingly.
Returns resolved_uri
"""
for key, val in PATH_ALIASES.items():
if re.match(key, requested_uri):
return re.sub(key, val, requested_uri)
return requested_uri | 5405a795a95279a354d455f3702dbf2c3dc6f1e0 | 3,659,007 |
def apim_api_delete(
client, resource_group_name, service_name, api_id, delete_revisions=None, if_match=None, no_wait=False):
"""Deletes an existing API. """
cms = client.api
return sdk_no_wait(
no_wait,
cms.delete,
resource_group_name=resource_group_name,
service_name=service_name,
api_id=api_id,
if_match="*" if if_match is None else if_match,
delete_revisions=delete_revisions if delete_revisions is not None else False) | 4be4f895ae576ee1ffd08af31abcdad193b84b2c | 3,659,008 |
def deep_copy(obj):
"""Make deep copy of VTK object."""
copy = obj.NewInstance()
copy.DeepCopy(obj)
return copy | c00c4ff44dad5c0c018152f489955f08e633f5ed | 3,659,009 |
def get_dunn_index(fdist, *clusters):
"""
Returns the Dunn index for the given selection of nodes.
J.C. Dunn. Well separated clusters and optimal fuzzy
partitions. 1974. J.Cybern. 4. 95-104.
"""
if len(clusters)<2:
raise ValueError, "At least 2 clusters are required"
intra_dist = []
for c in clusters:
for i in c.get_leaves():
if i is not None:
# item intraclsuterdist -> Centroid Diameter
a = fdist(i.profile, c.profile)*2
intra_dist.append(a)
max_a = numpy.max(intra_dist)
inter_dist = []
for i, ci in enumerate(clusters):
for cj in clusters[i+1:]:
# intracluster dist -> Centroid Linkage
b = fdist(ci.profile, cj.profile)
inter_dist.append(b)
min_b = numpy.min(inter_dist)
if max_a == 0.0:
D = 0.0
else:
D = min_b / max_a
return D | c78c5302d78b5d5969a5edf9e19b81ee6f68bfbf | 3,659,010 |
import random
def sample(words, n=10) -> str:
"""Sample n random words from a list of words."""
return [random.choice(words) for _ in range(n)] | cad435238c776b5fcda84d50295ac50298bf3ab2 | 3,659,011 |
def extract_peaks(
imzml_path,
db,
tol_ppm=DEFAULT_TOL_PPM,
tol_mode=DEFAULT_TOL_MODE,
base_mz=DEFAULT_BASE_MZ,
):
"""
Extract all peaks from the given imzML file for the supplied database of molecules.
:param imzml_path:
:param db: A pandas DataFrame containing an 'mz' column. Additional metadata columns are also allowed.
:param tol_ppm:
The maximum distance from a theoretical m/z to search for peaks. e.g. 3 means +/- 3ppm
:param tol_mode:
The model for adjusting tol_ppm based on the area of the mass range.
To match METASPACE, specify 'tof', which means 1ppm is always mz * 1e-6 (i.e. 1ppm at every mass)
See the `ppm_to_daltons` function for more examples.
:param base_mz:
The base m/z for tolerance calculations. Doesn't matter with 'tof'.
See the `ppm_to_daltons` function for more details.
:return:
coords_df - a DataFrame mapping spectrum idx to x,y values.
Needed for converting 'peaks_df' values to images
peaks - A list of dicts. Each dict contains:
'mol': A NamedTuple of the DB peak row. Access fields with e.g. peak['mol'].formula
'peaks_df': a DataFrame with one row per found peak. Columns:
'sp': Spectrum idx
'mz': m/z
'ints': Intensity value
"""
assert 'mz' in db.columns, 'db must have an "mz" column'
assert tol_mode in TOL_MODES, f'invalid tol_mode: {tol_mode}'
p = ImzMLParser(str(imzml_path))
coords_df = pd.DataFrame(p.coordinates, columns=['x', 'y', 'z'][:len(p.coordinates[0])], dtype='i')
coords_df['x'] -= np.min(coords_df.x)
coords_df['y'] -= np.min(coords_df.y)
mz_tol_lo, mz_tol_hi = tol_edges(db.mz, tol_ppm, tol_mode, base_mz)
# Uncomment this to add the tolerance boundaries to db for debugging:
# db['mz_tol_lo'], db['mz_tol_hi'] = mz_tol_lo, mz_tol_hi
mol_peaks = [[] for sp in range(len(coords_df))]
for sp, x, y in coords_df[['x', 'y']].itertuples(True, None):
mzs, ints = p.getspectrum(sp)
mz_range_lo = np.searchsorted(mzs, mz_tol_lo, 'left')
mz_range_hi = np.searchsorted(mzs, mz_tol_hi, 'right')
mask = mz_range_lo != mz_range_hi
for peak, idx_lo, idx_hi in zip(np.flatnonzero(mask), mz_range_lo[mask], mz_range_hi[mask]):
for i in range(idx_lo, idx_hi):
mol_peaks[peak].append((sp, mzs[i], ints[i]))
empty_peaks_df = pd.DataFrame({
'sp': pd.Series(dtype='i'),
'mz': pd.Series(dtype='f'),
'ints': pd.Series(dtype='f'),
})
result = [{
'mol': db_row,
'peaks_df': pd.DataFrame(peaks, columns=['sp', 'mz', 'ints']) if peaks else empty_peaks_df
} for db_row, peaks in zip(db.itertuples(), mol_peaks)]
return coords_df, result | b388fc8cd4e7e1be7a2179caddf8f07cf044173e | 3,659,012 |
def cov_dense(n_features=100, scale=0.5,
edges='ones', pos=True, force_psd=True, random_state=None):
"""
Returns a covariance matrix with a constant diagonal and whose off diagnale elements are obtained from adj_mats.complete_graph()
Parameters
----------
n_features: int
scale: float
Scale of the off diagonal entries.
edges: str
How the edges should be sampled. See adj_mats.complete_graph()
pos: bool
Should the off-diagonal entries be all positive.
force_psd: bool
Make sure the covariance matrix is positive semi-definite zeroing out all negative eigenvalues.
random_state: None, int
Random seed for sampling.
Output
------
cov: array-like, (n_features, n_features)
The sampled covariance matrix.
"""
cov = complete_graph(n_nodes=n_features, edges=edges,
pos=pos, random_state=random_state)
cov = cov * scale
np.fill_diagonal(cov, 1.0)
if force_psd:
cov = project_psd(cov)
return cov | 48b8f5fec91ea11acaf9ce026d8b1742b5185604 | 3,659,013 |
def measure_fwhm(array):
"""Fit a Gaussian2D model to a PSF and return the FWHM
Parameters
----------
array : numpy.ndarray
Array containing PSF
Returns
-------
x_fwhm : float
FWHM in x direction in units of pixels
y_fwhm : float
FWHM in y direction in units of pixels
"""
yp, xp = array.shape
y, x, = np.mgrid[:yp, :xp]
p_init = models.Gaussian2D()
fit_p = fitting.LevMarLSQFitter()
fitted_psf = fit_p(p_init, x, y, array)
return fitted_psf.x_fwhm, fitted_psf.y_fwhm | e3ee047b453b979387505a19bdfebb75950a3916 | 3,659,014 |
def exists(profile, bucket, name):
"""Check if a file exists in an S3 bucket.
Args:
profile
A profile to connect to AWS with.
bucket
The name of the bucket you want to find the file in.
name
The name of a file.
Returns:
True if it exists, False if it doesn't.
"""
result = fetch_by_name(profile, bucket, name)
return len(result) > 0 | 5269cca9198a1d100b76b13f6e2fbf7314d948fd | 3,659,015 |
def fetchquota(adr):
"""Retrieves the account quota information and passes the interesting
part of the json object along to the request source.
Arguments:
adr (str): The email account address of interest.
Returns:
The quota part of the json object for the response.
"""
debuginfo("Fetching quota info for account.")
return apirequest("quota", {'emailaccount': adr})["response"]["quota"] | 56b5c283ae38bc56c315cd6140d2fd910baeaf98 | 3,659,016 |
def project_login(driver):
"""
針對多綫程執行設定不同樣本編號,若修改問卷,也許提供該問卷樣本編號的第一順位號碼。
"""
SAMPLE_NUMBER = 20200101+sample_add
try:
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.XPATH, '//*[@name="{}"][1]'
.format(str(SAMPLE_NUMBER))))).click() # 選擇樣本編號作答
sleep(1)
driver.find_element_by_class_name('btn.btn-blue').click() # 點擊開始訪問
print("STEP 3[project_login]: Project Login Successfully !")
form_basic_info(driver)
except NoSuchElementException:
driver.find_element_by_xpath('//*[@id="case_in_prj_next"]/a').click() # 若搜尋不到樣本編號則尋找下一頁按鈕
return project_login(driver)
else:
return "STEP 3[project_login]: Loading took too much time !" | db3ef26e1769cb991c887509427f9d809047398d | 3,659,017 |
def convert_convolutionfunction_to_image(cf):
""" Convert ConvolutionFunction to an image
:param cf:
:return:
"""
return create_image_from_array(cf.data, cf.grid_wcs, cf.polarisation_frame) | 6f5819abce6a987665ff49af9e5fca70f586a478 | 3,659,018 |
def macro(libname):
"""Decorator for macros (Moya callables)."""
def deco(f):
exposed_elements[libname] = f
return f
return deco | c4d06d2b9e3fa7913445554794027e68328ab918 | 3,659,019 |
import logging
import torch
def get_dataloaders(dataset, mode='train', root=None, shuffle=True, pin_memory=True,
batch_size=8, logger=logging.getLogger(__name__), normalize=False, **kwargs):
"""A generic data loader
Parameters
----------
dataset : {"openimages", "jetimages", "evaluation"}
Name of the dataset to load
root : str
Path to the dataset root. If `None` uses the default one.
kwargs :
Additional arguments to `DataLoader`. Default values are modified.
"""
pin_memory = pin_memory and torch.cuda.is_available # only pin if GPU available
Dataset = get_dataset(dataset)
if root is None:
dataset = Dataset(logger=logger, mode=mode, normalize=normalize, **kwargs)
else:
dataset = Dataset(root=root, logger=logger, mode=mode, normalize=normalize, **kwargs)
return DataLoader(dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=NUM_DATASET_WORKERS,
collate_fn=exception_collate_fn,
pin_memory=pin_memory) | 6bb40b3eb1bc004418dd8910dab1432cd3984ca5 | 3,659,020 |
def stats_file(filename, shape, dtype=None, file_format='raw',
out_of_core=True, buffer_size=None, max_memory=None,
progress_frequency=None):
"""stats_file(filename, shape, dtype=None, file_format='raw',
out_of_core=True, buffer_size=None, max_memory=None,
progress_frequency=None) -> StatsInfo object
returns a StatsInfo about the content of 'filename', which is a cube with 'shape'.
If 'out_of_core' (out-of-core) is True, process 'buffer_size' elements at a time.
"""
shape = Shape(shape)
filename = interpolate_filename(filename, shape=shape, file_format=file_format, dtype=dtype)
if out_of_core and file_format == 'raw':
stats_info = stats_info_out_of_core(filename, shape=shape, dtype=dtype,
buffer_size=buffer_size, max_memory=max_memory,
progress_frequency=progress_frequency)
else:
cube = read_cube(file=filename, shape=shape, dtype=dtype, file_format=file_format)
stats_info = StatsInfo.stats_info(cube)
return stats_info | 750b4d334aa25a2423e5278eab7cd5ee43385303 | 3,659,021 |
def get_info(args):
"""
Loads todo.txt, sets up file paths, loads in any available star information, saves the
relevant parameters for each of the two main routines and sets the plotting parameters.
Parameters
----------
args : argparse.Namespace
command-line arguments
parallel : bool
if pysyd will be running in parallel mode
CLI : bool, optional
if CLI is not being used (i.e. `False`), the modules draw default values from a different location
Returns
-------
args : argparse.Namespace
the updated command-line arguments
"""
# Get parameters for all modules
args = get_parameters(args)
# Get invidual/specific star info from csv file (if it exists)
args = get_csv_info(args)
if args.cli:
# Check the input variables
check_input_args(args)
args = get_command_line(args)
set_plot_params()
return args | 919ae745424c16c6307db460ec2054b250ba72a6 | 3,659,022 |
def _weight_func(dist):
"""Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid."""
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide="ignore"):
retval = 1.0 / dist
return retval**2 | 9052b68592f2f6cf4c59c623a3561f77d3d2b933 | 3,659,023 |
def two_poles(time_limit=_DEFAULT_TIME_LIMIT, random=None,
environment_kwargs=None):
"""Returns the Cartpole Balance task with two poles."""
physics = Physics.from_xml_string(*get_model_and_assets(num_poles=2))
task = Balance(swing_up=True, sparse=False, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, time_limit=time_limit, **environment_kwargs) | b5236731d61464067073c3275cdd03d493f17821 | 3,659,024 |
def process_topic_entity(entity: dict, language: str) -> bool:
"""
Given a topic entity, gather its metadata
:param entity
:param language:
:type entity dict
:type language str
:returns bool
"""
try:
# Get ID
remote_id = entity["title"]
print("%s\t%s" % ("ID".ljust(16), remote_id))
# Get name from label
name = entity["labels"][language]["value"].lower()
print("%s\t%s" % ("name".ljust(16), name))
# Get brief
brief = entity["descriptions"][language]["value"].lower()
print("%s\t%s" % ("description".ljust(16), brief))
print_end()
except Exception as err:
print_err("%s error: %s" % (remote_id, err))
return False
return True | 2f03c0d24f35e49cd05ac11389b91345cc43de6e | 3,659,025 |
import math
import torch
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, b: float) -> Tensor:
"""Cut & paste from PyTorch official master until it's in a few official
releases - RW Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
Args:
tensor (Tensor):
An n-dimensional `Tensor`.
mean (float):
Mean of the normal distribution.
std (float):
Standard deviation of the normal distribution.
a (float):
Minimum cutoff value.
b (float):
Maximum cutoff value.
"""
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
error_console.log(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"Fdistribution of values may be incorrect.", stacklevel=2
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill image with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor | 064fce46591c490999c6495999554700f478878b | 3,659,026 |
def inverse_update(C, m, return_drop=False):
"""
Compute the inverse of a matrix with the m-th row and column dropped given knowledge of the inverse of the original
matrix.
C = inv(A)
B = drop_col(drop_row(A, m),m)
computes inv(B) given only C
Args:
C: inverse of full matirix
m: row and col to drop
return_drop: whether to also return the array used to drop the m-th row/col.
Returns:
B
if return_drop:
the array to drop row/col using jnp.take(v, drop_array)
"""
drop = drop_array(C.shape[0], m)
_a = jnp.take(C, drop, axis=0) # drop m row
a = jnp.take(_a, drop, axis=1)
c = jnp.take(C, drop, axis=1)[None, m, :] # drop m col
b = _a[:, m, None]
d = C[m, m]
res = a - (b @ c) / d
if return_drop:
return res, drop
return res | 0f368d30d0459fe3d07d6fc1fa19dedc449e23e9 | 3,659,027 |
def loss_calc(settings, all_batch, market_batch):
""" Calculates nn's NEGATIVE loss.
Args:
settings: contains the neural net
all_batch: the inputs to neural net
market_batch: [open close high low] used to calculate loss
Returns:
cost: loss - l1 penalty
"""
loss = settings['nn'].loss_np(all_batch, market_batch)
return -loss | fdca1bb0fa86d1972c2a0f8b1fab10183e98fb4e | 3,659,028 |
def fits_downloaded_correctly(fits_loc):
"""
Is there a readable fits image at fits_loc?
Does NOT check for bad pixels
Args:
fits_loc (str): location of fits file to open
Returns:
(bool) True if file at fits_loc is readable, else False
"""
try:
img, _ = fits.getdata(fits_loc, 0, header=True)
return True
except Exception: # image fails to open
return False | 8df470b4b2895fb7d77cbccefbd2eae7f22c649b | 3,659,029 |
def union_of_rects(rects):
"""
Calculates union of two rectangular boxes
Assumes both rects of form N x [xmin, ymin, xmax, ymax]
"""
xA = np.min(rects[:, 0])
yA = np.min(rects[:, 1])
xB = np.max(rects[:, 2])
yB = np.max(rects[:, 3])
return np.array([xA, yA, xB, yB], dtype=np.int32) | 904cb58f593bedfbf0e28136a446b4f877955e49 | 3,659,030 |
from typing import List
from typing import Dict
def configure_services(config: List[Dict]) -> Dict[str, GcpServiceQuery]:
"""
Generate GcpServiceQuery list from config
:param config: list with GcpServieQuery's configuration
:return: mapping of service name to GcpServiceQuery objects
"""
if not isinstance(config, list):
raise GcpServiceQueryConfigError(f"Invalid GcpServiceQuery config {config}")
result = {}
for entry in config:
if not isinstance(entry, dict):
raise GcpServiceQueryConfigError(f"Invalid GcpServiceQuery entry type: '{entry}'. "
f"Should be dict, is {type(entry)}")
serviceName = entry.get(SERVICE_NAME, None)
version = entry.get(VERSION, None)
queries = entry.get(QUERIES, None)
if not serviceName or not version or not queries:
raise GcpServiceQueryConfigError(f"Missing required key for entry {entry}")
gcp_service_query = GcpServiceQuery(serviceName, version)
# Check multiple entries with same name
if serviceName in result:
raise GcpServiceQueryConfigError(f"Multiple GCP service with same name: {serviceName}")
result[serviceName] = gcp_service_query
return result | 3c9b9472de4d319446ec4da1d990ecc1750bd248 | 3,659,031 |
def tags_get():
"""
Get endpoint /api/tag
args:
optional company_filter(int) - id of a company, will only return tag relation to said company
optional crowd(int) - 0 - 2 specifing crowd sourcing option. Key:
0 - all tags
1 - Only crowd sourced tags
2 - Only non crowd sourced tags
optional only_ids - if set only returns ids of tags
return:
List Tags - A json list of all tags that match the optional args.
"""
request_data = request.get_json()
company_filter = get_if_exist(request_data, "company_filter")
only_ids = get_if_exist(request_data,"only_ids")
crowd = get_if_exist(request_data, "crowd")
if crowd:
if crowd > 2:
return status.HTTP_400_BAD_REQUEST
crowd = 0
if company_filter:
t = db.session.query(
Tag_company.tag,
).filter(Tag_company.company == int(company_filter)).group_by(Tag_company.tag).subquery('t')
Tag_query = Tag.query.filter(
Tag.id == t.c.tag
)
else:
Tag_query = Tag.query
if crowd != 0:
crowd = (1==crowd)
Tag_query = Tag_query.filter_by(crowd_soured = crowd)
tags = Tag_query.all()
if only_ids:
return jsonify([tag.id for tag in tags]), status.HTTP_200_OK
else:
return jsonify([tag.serialize for tag in tags]), status.HTTP_200_OK | c009c0b84bbc825383dffb1141361dd1732b7b19 | 3,659,032 |
import os
import re
import gzip
import shutil
def gunzip(filename, targetdir):
"""Decompress a gzip-compressed file into a target directory.
Args:
filename: Full path to gzip file.
targetdir: Directory to decompress file into.
Returns:
The output file name.
Raises:
FileNotFoundError: `filename` does not exist.
"""
# We delete the .gz suffix and put the decompressed file into `targetdir`.
if not os.path.isfile(filename):
raise FileNotFoundError(f"File '{filename}' does not exist.")
targetfile = os.path.join(
targetdir, re.sub('\\.gz$', '', os.path.basename(filename))
)
cprint(f"Decompressing '{filename}'...", 'yellow')
try:
with open(targetfile, 'xb') as o, gzip.open(filename, 'rb') as i:
shutil.copyfileobj(i, o)
except Exception:
# Clean up target file.
if os.path.isfile(targetfile):
cprint(f"Removing file '{targetfile}'...", 'red')
os.remove(targetfile)
raise
assert targetfile
assert os.path.isfile(targetfile)
cprint(f"Successfully created file '{targetfile}'.", 'green')
return targetfile | 39f3eec7018fcfc95f3a825ee33cec5387b006ed | 3,659,033 |
def get_accept_languages(accept):
"""Returns a list of languages, by order of preference, based on an
HTTP Accept-Language string.See W3C RFC 2616
(http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html) for specification.
"""
langs = parse_http_accept_header(accept)
for index, lang in enumerate(langs):
langs[index] = lang_in_gettext_format(lang)
return langs | ad329605cd0101e61c2c21aa42f2c81a84db771b | 3,659,034 |
def get_princ_axes_xyz(tensor):
"""
Gets the principal stress axes from a stress tensor.
Modified from beachball.py from ObsPy, written by Robert Barsch.
That code is modified from Generic Mapping Tools (gmt.soest.hawaii.edu)
Returns 'PrincipalAxis' classes, which have attributes val, trend, plunge
Returns T, N, P
"""
tensor = np.array(tensor)
(D, V) = sorted_eigens(tensor)
pl = np.arcsin( -V[2] ) # 2
az = np.arctan2( V[0], -V[1] ) # 0 # 1
for i in range(0, 3):
if pl[i] <= 0:
pl[i] = -pl[i]
az[i] += np.pi
if az[i] < 0:
az[i] += 2 * np.pi
if az[i] > 2 * np.pi:
az[i] -= 2 * np.pi
pl *= 180 / np.pi
az *= 180 / np.pi
T = PrincipalAxis( D[0], az[0], pl[0] ) # 0 0 0
N = PrincipalAxis( D[1], az[1], pl[1] )
P = PrincipalAxis( D[2], az[2], pl[2] ) # 2 2 2
return(T, N, P) | e9285464e17eb987ebfd21c8e066ff745a856dc1 | 3,659,035 |
def extractYoushoku(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if 'The Other World Dining Hall' in item['tags'] and (chp or vol):
return buildReleaseMessageWithType(item, 'The Other World Dining Hall', vol, chp, frag=frag, postfix=postfix)
return False | 686daef4d594e0e53779be48d7c49a525cabe4ee | 3,659,036 |
def _perform_Miecalculations(diam, wavelength, n, noOfAngles=100.):
"""
Performs Mie calculations
Parameters
----------
diam: NumPy array of floats
Array of diameters over which to perform Mie calculations; units are um
wavelength: float
Wavelength of light in um for which to perform calculations
n: complex
Ensemble complex index of refraction
Returns
panda DataTable with the diameters as the index and the mie results in the different collumns
total_extinction_coefficient: this takes the sum of all particles crossections of the particular diameter in a qubic
meter. This is in principle the AOD of an L
"""
diam = np.asarray(diam)
extinction_efficiency = np.zeros(diam.shape)
scattering_efficiency = np.zeros(diam.shape)
absorption_efficiency = np.zeros(diam.shape)
extinction_crossection = np.zeros(diam.shape)
scattering_crossection = np.zeros(diam.shape)
absorption_crossection = np.zeros(diam.shape)
# phase_function_natural = pd.DataFrame()
angular_scattering_natural = pd.DataFrame()
# extinction_coefficient = np.zeros(diam.shape)
# scattering_coefficient = np.zeros(diam.shape)
# absorption_coefficient = np.zeros(diam.shape)
# Function for calculating the size parameter for wavelength l and radius r
sp = lambda r, l: 2. * np.pi * r / l
for e, d in enumerate(diam):
radius = d / 2.
# print('sp(radius, wavelength)', sp(radius, wavelength))
# print('n', n)
# print('d', d)
mie = bhmie.bhmie_hagen(sp(radius, wavelength), n, noOfAngles, diameter=d)
values = mie.return_Values_as_dict()
extinction_efficiency[e] = values['extinction_efficiency']
# print("values['extinction_crosssection']",values['extinction_crosssection'])
scattering_efficiency[e] = values['scattering_efficiency']
absorption_efficiency[e] = values['extinction_efficiency'] - values['scattering_efficiency']
extinction_crossection[e] = values['extinction_crosssection']
scattering_crossection[e] = values['scattering_crosssection']
absorption_crossection[e] = values['extinction_crosssection'] - values['scattering_crosssection']
# phase_function_natural[d] = values['phaseFct_natural']['Phase_function_natural'].values
angular_scattering_natural[d] = mie.get_angular_scatt_func().natural.values
# print('\n')
# phase_function_natural.index = values['phaseFct_natural'].index
angular_scattering_natural.index = mie.get_angular_scatt_func().index
out = pd.DataFrame(index=diam)
out['extinction_efficiency'] = pd.Series(extinction_efficiency, index=diam)
out['scattering_efficiency'] = pd.Series(scattering_efficiency, index=diam)
out['absorption_efficiency'] = pd.Series(absorption_efficiency, index=diam)
out['extinction_crossection'] = pd.Series(extinction_crossection, index=diam)
out['scattering_crossection'] = pd.Series(scattering_crossection, index=diam)
out['absorption_crossection'] = pd.Series(absorption_crossection, index=diam)
return out, angular_scattering_natural | 4ce8fa518477c3eb38816d8f441207716b3a90df | 3,659,037 |
from typing import Tuple
def load_config_dict(pipette_id: str) -> Tuple[
'PipetteFusedSpec', 'PipetteModel']:
""" Give updated config with overrides for a pipette. This will add
the default value for a mutable config before returning the modified
config value.
"""
override = load_overrides(pipette_id)
model = override['model']
config = fuse_specs(model)
if 'quirks' not in override.keys():
override['quirks'] = {key: True for key in config['quirks']}
for top_level_key in config.keys():
if top_level_key != 'quirks':
add_default(config[top_level_key]) # type: ignore
config.update(override) # type: ignore
return config, model | 485db2aad493eda30e6dad07b3d6c9413bc5c3c8 | 3,659,038 |
def ErrorAddEncKey(builder, encKey):
"""This method is deprecated. Please switch to AddEncKey."""
return AddEncKey(builder, encKey) | c39bb36b3923ca1a0e508b23ef84a6de130700a3 | 3,659,039 |
def _read_txs_from_file(f):
"""
Validate headers and read buy/sell transactions from the open file-like object 'f'.
Note: we use the seek method on f.
"""
ans = []
f.seek(0)
workbook = openpyxl.load_workbook(f)
sheet = workbook.active
all_contents = list(sheet.rows)
_validate_header(all_contents[0])
contents = all_contents[1:]
for row in contents:
item = _tx_from_gemini_row(row)
if item is not None:
ans.append(item)
return ans | 0c62c647a2ff1a797fb5e8593279bbf64bc0d495 | 3,659,040 |
from typing import Union
def get_generator_regulation_lower_term_4(data, trader_id, intervention) -> Union[float, None]:
"""Get L5RE term 4 in FCAS availability calculation"""
# Term parameters
enablement_min = get_effective_enablement_min(data, trader_id, 'L5RE')
energy_target = lookup.get_trader_solution_attribute(data, trader_id, '@EnergyTarget', float, intervention)
lower_slope_coefficient = get_lower_slope_coefficient(data, trader_id, 'L5RE')
# Ignore limit if slope coefficient = 0
if lower_slope_coefficient == 0:
return None
return 0 if (lower_slope_coefficient is None) else (energy_target - enablement_min) / lower_slope_coefficient | 626ab26f92feefea25777046c1fc37c4115f7be8 | 3,659,041 |
def count_parameters(model):
"""count model parameters"""
return sum(p.numel() for p in model.parameters() if p.requires_grad) | 5edcb3ee03794cb66f5986670c4825efab93a1d8 | 3,659,042 |
def string_rule_variable(label=None, params=None, options=None, public=True):
"""
Decorator to make a function into a string rule variable.
NOTE: add **kwargs argument to receive Rule as parameters
:param label: Label for Variable
:param params: Parameters expected by the Variable function
:param options: Options parameter to specify expected options for the variable.
The value used in the Condition IS NOT checked against this list.
:param public: Flag to identify if a variable is public or not
:return: Decorator function wrapper
"""
return _rule_variable_wrapper(StringType, label, params=params, options=options, public=public) | 3bd35ac2e27c58ee35f7e13bb359cb8240f8efda | 3,659,043 |
def detect_horizon_lines(image_thre, row, busbar, cell_size, thre=0.6, split=50, peak_interval=None, margin=None):
""" Detect horizontal edges by segmenting image into vertical splits
Parameters
---------
image_thre: array
Adaptive threshold of raw images
row: int
Number of rows of solar module
busbar: int
Number of busbars of a solar cell
cell_size: int
Output cell size in pixel
thre: float
Peak intensity above THRE will be set as 1.
Note that the edge's peak intensity should be lowest because edges are black
split: int
Number of splits
peak_interval: int
Distance between each peak.
Returns
-------
hline_abs_couple: array
Suppose a line is y=a*x+b.
Return 'a' and 'b' of a couple edges (top and bottom of a cell).
"""
#width = image_thre.shape[1]
#end = int(width / split)
#image_vsplits = np.hsplit(image_thre[:, :end * split], split) # vertical splits
#image_vsplits.append(image_thre[:, end * split:])
image_vsplits = split_img(image_thre, split=split, direction=1)
edge_y = []
inx_x = []
for inx, im_split in enumerate(image_vsplits):
#sum_split = np.sum(im_split, axis=1)
#sum_split = sum_split / np.max(sum_split)
#sum_split[sum_split > thre] = 1
#if peak_interval is None:
# peak_interval = int(cell_size / (busbar + 1) * 0.5)
#peak, _ = find_peaks(-1 * sum_split, distance=peak_interval)
peak = detect_peaks(im_split, 1, cell_size, busbar, thre, peak_interval, margin=margin)
if len(peak) >= row * (busbar + 1) - 1:
peak_new = [peak[0]]
for i in range(1, len(peak) - 1):
if np.abs(peak[i] - peak[i + 1]) < 15:
peak_mean = (peak[i] + peak[i + 1]) / 2
peak_new.append(peak_mean)
elif np.abs(peak[i] - peak[i - 1]) > 15:
peak_new.append(peak[i])
peak_new.append(peak[-1])
peak_new = np.array(peak_new)
peak_new_a = np.delete(peak_new, 0)
peak_new_b = np.delete(peak_new, -1)
peak_new_detect = peak_new[detectoutliers(np.abs(peak_new_a - peak_new_b), rate=0.5, option=1)]
if len(peak_new_detect) == (busbar + 1) * row + 1:
edge_y.append(peak_new_detect)
inx_mean = ((2 * inx + 1) * (image_thre.shape[1] / split) - 1) / 2
inx_x.append(inx_mean)
edge_y = np.array(edge_y)
hlines = list(zip(*edge_y))
hlines = np.array(hlines)
inx_x = np.array(inx_x)
# for lines in hlines:
# lines_new = self.detectoutliers(lines, option=0)
# while np.std(lines_new) > 10:
# lines_new = self.detectoutliers(lines, rate=1, option=0)
# hb_abs = [] # all lines including busbar
hb_abs = linear_regression(inx_x, hlines, outlier_filter=True)
hline_abs_couple = [] # all lines excluding busbar
# for horizonline in hlines:
# ab, _ = curve_fit(self.linear, inx_x, horizonline) # y = ax + b
# hb_abs.append(ab)
hline_abs_couple = [(hb_abs[(busbar + 1) * i], hb_abs[(busbar + 1) * (i + 1)]) for i in range(row)]
# hline_abs = [(hb_abs[(4+1)*i],hb_abs[(4+1)*(i+1)]) for i in range(6)]
# hline_abs = [(hb_abs[(self.busbar+2)*i],hb_abs[(self.busbar+2)*(i+1)-1]) for i in range(self.row)]
return hline_abs_couple | e8365b29829d6e1a71c4c9caefff221d9357b0a3 | 3,659,044 |
def countRoem(cards, trumpSuit=None):
"""Counts the amount of roem (additional points) in a list of cards
Args:
Returns:
Integer value how many points of roem are in the cards in total
"""
roem = 0
# Stuk
# Without a trumpSuit, stuk is impossible
if trumpSuit is not None:
#trumpKing = list(filter(lambda c: c.suit == trumpSuit and c.rank == 4, cards))
#trumpQueen = list(filter(lambda c: c.suit == trumpSuit and c.rank == 5, cards))
trumpKing = [card for card in cards if card.suit == trumpSuit and card.rank == 4]
trumpQueen = [card for card in cards if card.suit == trumpSuit and card.rank == 5]
if trumpKing and trumpQueen:
roem += 20
# Normal roem
# For each suit we check whether there are 3 cards in that suit, if so there is chance for roem
for i in range(4):
#cardsInSuit = list(filter(lambda c: c.suit == i, cards))
cardsInSuit = [card for card in cards if card.suit == i]
if len(cardsInSuit) >= 3:
cards = cardsInSuit
# We sort the list and check the difference between consecutive cards
cards.sort(key=lambda c: c.rank)
subtractList = []
for i in range(len(cards) - 1):
#subtract = abs(cards[i].roemRank - cards[i+1].roemRank)
subtract = abs(ROEMRANKS[cards[i].rank] - ROEMRANKS[cards[i].rank])
subtractList.append(subtract)
# If more than 1 difference equals 1, we know at least 3 cards have consecutive ranks
#lenOfOnes = len(list(filter(lambda x: x == 1, subtractList)))
lenOfOnes = len([x for x in subtractList if x == 1])
if lenOfOnes == 2:
roem += 20
elif lenOfOnes == 3:
roem += 50
return roem | 31e2dbf346801fa81e5a5905a480f6d5b8e9ce1a | 3,659,045 |
from typing import Optional
def batch_to_space(
data: NodeInput,
block_shape: NodeInput,
crops_begin: NodeInput,
crops_end: NodeInput,
name: Optional[str] = None,
) -> Node:
"""Perform BatchToSpace operation on the input tensor.
BatchToSpace permutes data from the batch dimension of the data tensor into spatial dimensions.
:param data: Node producing the data tensor.
:param block_shape: The sizes of the block of values to be moved.
:param crops_begin: Specifies the amount to crop from the beginning along each axis of `data`.
:param crops_end: Specifies the amount to crop from the end along each axis of `data`.
:param name: Optional output node name.
:return: The new node performing a BatchToSpace operation.
"""
return _get_node_factory_opset2().create(
"BatchToSpace", as_nodes(data, block_shape, crops_begin, crops_end),
) | fe7004243e7c4a6dfd78b1f39df22ba7290c9244 | 3,659,046 |
def url_in(url):
""" Send a URL and I'll post it to Hive """
custom_json = {'url': url}
trx_id , success = send_notification(custom_json)
return trx_id, success | ecfcb02cdbd9050a5a305f38d9673d64b9b1d307 | 3,659,047 |
def login():
"""
Display a basic login form in order to log in a user
"""
if request.method == 'GET':
return render_template('login.html')
else:
try:
usr = User.query.get(request.form['user_id'])
if bcrypt.checkpw(request.form['user_password'].encode('utf-8'),usr.password):
login_user(usr, remember=True)
flash('Logged in successfully')
return redirect(session['next_url'])
except Exception as e:
print("Sorry this user don't exist")
print(e)
return render_template('login.html') | 37702dc290d627544d5714ed21d8804eaa00f354 | 3,659,048 |
def hflip(stream):
"""Flip the input video horizontally.
Official documentation: `hflip <https://ffmpeg.org/ffmpeg-filters.html#hflip>`__
"""
return FilterNode(stream, hflip.__name__).stream() | 140f7d4ceecee09e5f0ba7db9a68cee15e536ffa | 3,659,049 |
def get_diagonal_ripple_rainbows_2():
"""
Returns 11 diagonal ripple rainbows
Programs that use this function:
- Diagonal Ripple 3
- Diagonal Ripple 4
"""
rainbow01 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8]
]
rainbow02 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8]
]
rainbow03 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8]
]
rainbow04 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8]
]
rainbow05 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8]
]
rainbow06 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8]
]
rainbow07 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8]
]
rainbow08 = [
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H]
]
rainbow09 = [
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow10 = [
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow11 = [
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow12 = [
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow13 = [
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow14 = [
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow15 = [
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
return rainbow01, rainbow02, rainbow03, rainbow04, rainbow05, \
rainbow06, rainbow07, rainbow08, rainbow09, rainbow10, \
rainbow11, rainbow12, rainbow13, rainbow14, rainbow15 | ce917a063de580b2fcacfe2b59991585aefe30a4 | 3,659,050 |
def matrix_prod(A, B, display = False):
"""
Computes the matrix product of two matrices using array slicing and vector operations.
"""
if A.shape[1] != B.shape[0]:
raise ValueError("Dimensions not compatible.")
# Not allowed!?
#matrix = A.dot(B)
# Dotproduct of each A.row*B.clm
matrix = np.array([[np.sum(A[i,:]*B[:,j]) for j in range(B.shape[1])]
for i in range(A.shape[0])])
if display:
print(matrix)
return matrix | c38c3c3c9b1d2cc3edf6efb1997fe94a15c870ec | 3,659,051 |
def remove_quat_discontinuities(rotations):
"""
Removing quat discontinuities on the time dimension (removing flips)
:param rotations: Array of quaternions of shape (T, J, 4)
:return: The processed array without quaternion inversion.
"""
rots_inv = -rotations
for i in range(1, rotations.shape[0]):
# Compare dot products
replace_mask = np.sum(rotations[i - 1: i] * rotations[i: i + 1], axis=-1) < np.sum(
rotations[i - 1: i] * rots_inv[i: i + 1], axis=-1)
replace_mask = replace_mask[..., np.newaxis]
rotations[i] = replace_mask * rots_inv[i] + (1.0 - replace_mask) * rotations[i]
return rotations | 7d3874f5c56f82f3a8951daef48ac115f7f8943a | 3,659,052 |
import glob
def compute_profile_from_frames(frames_str, ax, bt, box, N_bins=100, \
shift=None, verbose=False):
"""
Compute a density profile from a batch of xyz frames.
Input
=====
- frames_str: a regex containing frames in xyz format
- ax: axis along which to compute the profile
- bt: bead type
- box: box size, a (3, 3) matrix
- N_bins: number of bins
Output
======
- r: position vector
- pr: density profile vector
"""
frames = glob.glob(frames_str)
assert len(frames) != 0, "No xyz frames captured."
Nf = len(frames)
N = int(open(frames[0], "r").readline())
if verbose:
print(frames)
L = np.diag(box)
bins = np.linspace(0, L[ax], N_bins + 1)
dr = bins[1] - bins[0]
r = dr / 2.0 + bins[:-1]
Lsurf = L[list(set(range(3)).difference([ax]))] # cross-sectional surface
pr = np.zeros_like(r)
for frame in frames:
bl, X0 = read_xyz(frame)
if shift is not None:
assert len(shift) == 3, "Vector of shifting must be of size 3."
shift = np.array(shift)
X0 = X0 + shift
X0 = X0 % L
if bt == -1:
X = X0
else:
X = X0[bl == bt]
pr += np.histogram(X[:, ax], bins=bins)[0]
pr = pr / (dr * np.prod(Lsurf)) / Nf
return r, pr | 70702dbcf73f2a7e9894899ca20f81eadc3046fe | 3,659,053 |
import urllib
import requests
import json
def wikipedia_search(query, lang="en", max_result=1):
"""
https://www.mediawiki.org/wiki/API:Opensearch
"""
query = any2unicode(query)
params = {
"action":"opensearch",
"search": query,
"format":"json",
#"formatversion":2,
#"namespace":0,
"suggest":"true",
"limit": 10
}
urlBase = "https://{}.wikipedia.org/w/api.php?".format(lang)
url = urlBase + urllib.urlencode(any2utf8(params))
#logging.info(url)
r = requests.get(url)
jsonData = json.loads(r.content)
#logging.info(jsonData)
items = []
ret = {"query":query, "itemList":items}
for idx, label in enumerate(jsonData[1][0:max_result]):
description = jsonData[2][idx]
url = jsonData[3][idx]
item = {
"name": label,
"description":description,
"url": url,
}
items.append(item)
return ret | e88b50c11d78989e086417d15e91515d24151586 | 3,659,054 |
def group_result(result, func):
"""
:param result: A list of rows from the database: e.g. [(key, data1), (key, data2)]
:param func: the function to reduce the data e.g. func=median
:return: the data that is reduced. e.g. [(key, (data1+data2)/2)]
"""
data = {}
for key, value in result:
if key in data.keys():
data[key].append(value)
else:
data[key] = [value]
for key in data:
data[key] = func(data[key])
return data.items() | 7687521c216210badcda5ee54bd59a3bc6a234bd | 3,659,055 |
import torch
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = cv2.resize(orig_im, (inp_dim, inp_dim))
# img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = img.transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim | 65159c8ce3a2df3cb09a6f1f318bb3374943e314 | 3,659,056 |
import uuid
def extractLogData(context):
"""
helper function to extract all important data from the web context.
:param context: the web.py context object
:return: a dictionary with all information for the logging.
"""
logData = {}
logData['ip'] = context.ip
logData['account'] = context.env.get('HTTP_RUCIO_ACCOUNT')
logData['appid'] = 'clients' # has to changed, but atm no appid is send with the clients
logData['clientref'] = context.env.get('HTTP_RUCIO_CLIENTREF')
logData['uri'] = context.method + ' ' + context.protocol + "://" + context.host + context.homepath + context.fullpath
logData['requestid'] = uuid()
logData['requestHeader'] = context.env
logData['responseHeader'] = ''
logData['httpCode'] = ''
logData['duration'] = ''
return logData | 5fb68d4f19dae0b7175a089dd1366cab0407152b | 3,659,057 |
def Backbone(backbone_type='ResNet50', use_pretrain=True):
"""Backbone Model"""
weights = None
if use_pretrain:
weights = 'imagenet'
def backbone(x_in):
if backbone_type == 'ResNet50':
return ResNet50(input_shape=x_in.shape[1:], include_top=False,
weights=weights)(x_in)
elif backbone_type == 'MobileNetV2':
return MobileNetV2(input_shape=x_in.shape[1:], include_top=False,
weights=weights)(x_in)
else:
raise TypeError('backbone_type error!')
return backbone | 23bc493e8306d5dc5dba33cd2f67de231cbb3e02 | 3,659,058 |
def start(ctx, vca_client, **kwargs):
"""
power on server and wait network connection availability for host
"""
# combine properties
obj = combine_properties(
ctx, kwargs=kwargs, names=['server'],
properties=[VCLOUD_VAPP_NAME, 'management_network'])
# get external
if obj.get('use_external_resource'):
ctx.logger.info('not starting server since an external server is '
'being used')
else:
vapp_name = get_vapp_name(ctx.instance.runtime_properties)
config = get_vcloud_config()
vdc = vca_client.get_vdc(config['vdc'])
vapp = vca_client.get_vapp(vdc, vapp_name)
_power_on_vm(ctx, vca_client, vapp, vapp_name)
if not _get_state(ctx=ctx, vca_client=vca_client):
return ctx.operation.retry(
message="Waiting for VM's configuration to complete",
retry_after=5) | 6e3e3a94095ef200e586f7dfdc7e117ae3ee375f | 3,659,059 |
def softplus(z):
"""Numerically stable version of log(1 + exp(z))."""
# see stabilizing softplus: http://sachinashanbhag.blogspot.com/2014/05/numerically-approximation-of-log-1-expy.html # noqa
mu = z.copy()
mu[z > 35] = z[z > 35]
mu[z < -10] = np.exp(z[z < -10])
mu[(z >= -10) & (z <= 35)] = log1p(np.exp(z[(z >= -10) & (z <= 35)]))
return mu | f683c1f2240d053c4ee2c24f64ff5576c0d9d32d | 3,659,060 |
from typing import Mapping
from typing import Hashable
from typing import Union
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import OrderedDict
from typing import Any
def merge_indexes(
indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]],
variables: Mapping[Hashable, Variable],
coord_names: Set[Hashable],
append: bool = False,
) -> "Tuple[OrderedDict[Any, Variable], Set[Hashable]]":
"""Merge variables into multi-indexes.
Not public API. Used in Dataset and DataArray set_index
methods.
"""
vars_to_replace = {} # Dict[Any, Variable]
vars_to_remove = [] # type: list
error_msg = "{} is not the name of an existing variable."
for dim, var_names in indexes.items():
if isinstance(var_names, str) or not isinstance(var_names, Sequence):
var_names = [var_names]
names, codes, levels = [], [], [] # type: (list, list, list)
current_index_variable = variables.get(dim)
for n in var_names:
try:
var = variables[n]
except KeyError:
raise ValueError(error_msg.format(n))
if (
current_index_variable is not None
and var.dims != current_index_variable.dims
):
raise ValueError(
"dimension mismatch between %r %s and %r %s"
% (dim, current_index_variable.dims, n, var.dims)
)
if current_index_variable is not None and append:
current_index = current_index_variable.to_index()
if isinstance(current_index, pd.MultiIndex):
try:
current_codes = current_index.codes
except AttributeError:
# fpr pandas<0.24
current_codes = current_index.labels
names.extend(current_index.names)
codes.extend(current_codes)
levels.extend(current_index.levels)
else:
names.append("%s_level_0" % dim)
cat = pd.Categorical(current_index.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
if not len(names) and len(var_names) == 1:
idx = pd.Index(variables[var_names[0]].values)
else:
for n in var_names:
try:
var = variables[n]
except KeyError:
raise ValueError(error_msg.format(n))
names.append(n)
cat = pd.Categorical(var.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
idx = pd.MultiIndex(levels, codes, names=names)
vars_to_replace[dim] = IndexVariable(dim, idx)
vars_to_remove.extend(var_names)
new_variables = OrderedDict(
[(k, v) for k, v in variables.items() if k not in vars_to_remove]
)
new_variables.update(vars_to_replace)
new_coord_names = coord_names | set(vars_to_replace)
new_coord_names -= set(vars_to_remove)
return new_variables, new_coord_names | b893d118312697d1995a0a42bbff8354b73ca642 | 3,659,061 |
def least_squares(m, n):
""" Create a least squares problem with m datapoints and n dimensions """
A = np.random.randn(m, n)
_x = np.random.randn(n)
b = A.dot(_x)
x = cp.Variable(n)
return (x, cp.Problem(cp.Minimize(cp.sum_squares(A * x - b) + cp.norm(x, 2)))) | 21b3b4577ec232f6e74d1f096946d0923f867cf7 | 3,659,062 |
import sys
def getRef(refFile):
"""Returns a genome reference."""
refDict={}
hdList=[]
ref=''
num=0
try:
f=open(refFile)
except IOError:
errlog.error('Cannot find reference file ' +refFile+'. Please check pathname.')
sys.exit('Cannot find reference file '+refFile+'. Please check pathname.')
i=f.readline()
head=i[1:51].rstrip()
i=f.readline().rstrip()
while i:
if i[0]!='>':
ref+=i.rstrip()
i=f.readline()
else:
if head in hdList:
num+=1
head=head+str(num)
ref=ref.upper()
for l in 'RYMKSWHBVD':
ref=ref.replace(l,'N')
refDict[head]=ref
hdList.append(head)
head=i[1:51].rstrip()
i=f.readline()
ref=''
ref=ref.upper()
for l in 'RYMKSWHBVD':
ref=ref.replace(l,'N')
refDict[head]=ref
errlog.debug('Reference file successfully parsed.')
return refDict | bd6f149662077acff88f42029c87deab08e92331 | 3,659,063 |
import os
def cleanup_path(paths, removedir=True):
""" remove unreable files and directories from the input path collection,
skipped include two type of elements: unwanted directories if removedir is True
or unaccessible files/directories
"""
checked = []
skipped = []
for ele in paths:
ele = os.path.abspath(ele)
if os.path.exists(ele) and os.access(ele, os.R_OK):
if os.path.isdir(ele) and removedir:
skipped.append(ele)
else:
checked.append(ele)
else:
skipped.append(ele)
return checked, skipped | 36105a4617ea0cfffc870dd083540f81aaf56079 | 3,659,064 |
def expand_amn(a, kpoints, idx, Rvectors, nproj_atom=None):
"""
Expand the projections matrix by translations of the orbitals
Parameters
----------
a : ndarray, shape (nkpts, nbnds, nproj)
kpoints : ndarray, shape (nkpts, 3)
idx : ndarray
indices of translated orbitals
Rvectors: ndarray
translation vectors for the orbitals
nproj_atom: ndarray, optional
number of projections on each atom, with idx and Rvectors now describing
atoms instead of orbitals
"""
assert len(Rvectors) == len(idx)
if nproj_atom is not None:
assert len(nproj_atom) == len(idx)
idx_new = []
Rvectors_new = []
for iatom, i in enumerate(idx):
offset = np.sum(nproj_atom[:i])
for j in range(nproj_atom[i]):
idx_new.append(offset+j)
Rvectors_new.append(Rvectors[iatom])
idx = idx_new
Rvectors = Rvectors_new
nkpts, nbnds, nproj = a.shape
a1 = np.zeros((nkpts, nbnds, len(idx)), dtype=complex)
k_dot_R = np.einsum('ki,ri->kr', kpoints, Rvectors)
exp_factors = np.exp(-1j * 2*np.pi * k_dot_R)
a1 = a[:, :, idx] * exp_factors[:, np.newaxis, :]
return a1 | d68a7cd4cb019b2d516305d0b6a2b45f6a422ba8 | 3,659,065 |
def combine_basis_vectors(weights, vectors, default_value=None, node_num=None):
"""
Combine basis vectors using ``weights`` as the Manning's n value for each
basis vector. If a ``default_value`` is set then all nodes with out data
are set to the ``default_value``.
:type weights: :class:`numpy.ndarray`
:param weights: array of size (num_of_basis_vec, 1)
:type vectors: list of dicts OR :class:`numpy.ndarray` of size (node_num,
num_of_basis_vec)
:param vectors: basis vectors
:returns: an array of size (node_num, 1) containing the manningsn value at
all nodes in numerical order or a dictionary
"""
if len(weights) != len(vectors):
raise LenError('weights, vectors', 'dimensions do not match')
if isinstance(vectors[0], np.array):
combine_bv_array(weights, vectors)
elif default_value and node_num:
return dict_to_array(add_dict(vectors, weights)[0], default_value,
node_num)
else:
return add_dict(vectors, weights)[0] | 50a0cc5ba8ad88a480fc589f6fbe184548700485 | 3,659,066 |
from typing import List
from typing import Tuple
from typing import Any
def _prepare_data_for_node_classification(
graph: nx.Graph, seed_node: int
) -> List[Tuple[Any, Any]]:
"""
Position seed node as the first node in the data.
TensorFlow GNN has a convention whereby the node to be classified, the "seed node",
is positioned first in the component. This is for use with layers such as
`tfgnn.keras.layers.ReadoutFirstNode` which extracts the first node from a component.
"""
seed_data = graph.nodes(data=True)[seed_node]
data = [(seed_data["features"], seed_data["label"])]
data += [
(data["features"], data["label"])
for node, data in graph.nodes(data=True)
if node != seed_node
]
return data | 3ed718e583d9e96b2c5bd28e5640c36e5e009065 | 3,659,067 |
import logging
def init():
""" Init the application and add routes """
logging.basicConfig(format='%(asctime)s: [%(levelname)s]: %(message)s',
level=logging.DEBUG)
global theconfig
theconfig = get_config()
global rc
rc = init_redis(theconfig)
app = default_app()
return app | c9c8694bf389fb52572ccd43a83d7c410a3538a2 | 3,659,068 |
def norm_potential(latitude, longitude, h, refell, lmax):
"""
Calculates the normal potential at a given latitude and height
Arguments
---------
latitude: latitude in degrees
longitude: longitude in degrees
height: height above reference ellipsoid in meters
refell: reference ellipsoid name
lmax: maximum spherical harmonic degree
Returns
-------
U: normal potential at height h
dU_dr: derivative of normal potential with respect to radius
dU_dtheta: derivative of normal potential with respect to theta
"""
#-- get ellipsoid parameters for refell
ellip = ref_ellipsoid(refell)
a = np.float128(ellip['a'])
ecc1 = np.float128(ellip['ecc1'])
GM = np.float128(ellip['GM'])
J2 = np.float128(ellip['J2'])
#-- convert from geodetic latitude to geocentric latitude
latitude_geodetic_rad = (np.pi*latitude/180.0).astype(np.float128)
longitude_rad = (np.pi*longitude/180.0).astype(np.float128)
N = a/np.sqrt(1.0 - ecc1**2.0*np.sin(latitude_geodetic_rad)**2.0)
X = (N + h) * np.cos(latitude_geodetic_rad) * np.cos(longitude_rad)
Y = (N + h) * np.cos(latitude_geodetic_rad) * np.sin(longitude_rad)
Z = (N * (1.0 - ecc1**2.0) + h) * np.sin(latitude_geodetic_rad)
rr = np.sqrt(X**2.0 + Y**2.0 + Z**2.0)
latitude_geocentric = np.arctan(Z / np.sqrt(X**2.0 + Y**2.0))
#-- calculate even zonal harmonics
n = np.arange(2, 12+2, 2, dtype=np.float128)
J2n = cosine_even_zonals(J2, ecc1, n/2.0)
#-- normalized cosine harmonics: Cn = -Jn/np.sqrt(2.0*n+1.0)
#-- J2 = 0.108262982131e-2
C_2 = -J2n[0]/np.sqrt(5.0)
#-- J4 = -0.237091120053e-5
C_4 = -J2n[1]/np.sqrt(9.0)
#-- J6 = 0.608346498882e-8
C_6 = -J2n[2]/np.sqrt(13.0)
#-- J8 = -0.142681087920e-10
C_8 = -J2n[3]/np.sqrt(17.0)
#-- J10 = 0.121439275882e-13
C_10 = -J2n[4]/np.sqrt(21.0)
#-- J12 = 0.205395070709e-15
C_12 = -J2n[5]/np.sqrt(25.0)
#-- calculate legendre polynomials at latitude and their first derivative
Pl,dPl = legendre_polynomials(lmax, np.sin(latitude_geocentric),
ASTYPE=np.float128)
#-- normal potentials and derivatives
U = (GM/rr) * (1.0 + (a/rr)**2.*C_2*Pl[2,:] + (a/rr)**4.*C_4*Pl[4,:] + \
(a/rr)**6.*C_6*Pl[6,:] + (a/rr)**8.*C_8*Pl[8,:] + \
(a/rr)**10.*C_10*Pl[10,:] + (a/rr)**12.*C_12*Pl[12,:])
dU_dr = GM * (-1.0 / rr**2.0 - 3.0*(a**2.0/rr**4.0)*C_2*Pl[2,:] - \
5.0*(a**4.0/rr**6.0)*C_4*Pl[4,:] -7.0*(a**6.0/rr**8.0)*C_6*Pl[6,:] - \
9.0*(a**8.0/rr**10.)*C_8*Pl[8,:] -11.*(a**10./rr**12.)*C_10*Pl[10,:] - \
13.*(a**12./rr**14.)*C_12*Pl[12,:])
dU_dtheta = (GM/rr) * (1.0 + (a/rr)**2.0*C_2*dPl[2,:] + \
(a/rr)**4.0*C_4*dPl[4,:] + (a/rr)**6.0*C_6*dPl[6,:] + \
(a/rr)**8.0*C_8*dPl[8,:] + (a/rr)**10.0*C_10*dPl[10,:] + \
(a/rr)**12.0*C_12*dPl[12,:])
#-- return the potentials
return (U, dU_dr, dU_dtheta) | 5fc9f26a206c4fced5ebd434c84dda26621c08dc | 3,659,069 |
import os
import glob
def _findPlugInfo(rootDir):
""" Find every pluginInfo.json files below the root directory.
:param str rootDir: the search start from here
:return: a list of files path
:rtype: [str]
"""
files = []
for root, dirnames, filenames in os.walk(rootDir):
files.extend(glob.glob(root + '/plugInfo.json'))
return files | d3367d8f71598d1fa1303baec0d4f6803b1293b2 | 3,659,070 |
def get_pp_gene_chains(chain_class_file, v=False):
"""Get gene: pp chains dict."""
gene_to_pp_chains = defaultdict(list) # init the dict
f = open(chain_class_file, "r") # open file with classifications
f.__next__() # skip header
for line in f:
line_data = line.rstrip().split("\t")
# line contains the following fields"
# gene orthologs paralogs trans p_pseudogenes
trans = line_data[0]
# proc_pseudogene chains are in the 4th field
pp_genes_field = line_data[4]
if pp_genes_field == "0":
# it 0 -> no ppgene chains -> skip
continue
# parse comma-separated string and save to dict
pp_genes = [int(x) for x in pp_genes_field.split(",") if x != ""]
gene_to_pp_chains[trans] = pp_genes
f.close()
if v:
verbose(f"Extracted {len(gene_to_pp_chains)} genes with proc pseudogenes")
return gene_to_pp_chains | d09fe5f7e8aaed0b8aa46593931b3cda655f56e3 | 3,659,071 |
import os
import fnmatch
def rec_search(wildcard):
"""
Traverse all subfolders and match files against the wildcard.
Returns:
A list of all matching files absolute paths.
"""
matched = []
for dirpath, _, files in os.walk(os.getcwd()):
fn_files = [os.path.join(dirpath, fn_file) for fn_file
in fnmatch.filter(files, wildcard)]
matched.extend(fn_files)
return matched | 47e8bd48956de87a72c996795f94b69979bf992f | 3,659,072 |
def skip_object(change_mode, change):
"""
If `Mode` is `change`: we do not care about the `Conditions`
Else:
If `cfn` objects:
- We can omit the `Conditions`, objects will be involed when `Mode` is `provision` or `destroy`. (Original design. Backward compatibility.)
- In case `Conditions` is declared, objects will be involed when `Mode` matches with `Conditions`.
If `aws` objects: we must declare `Conditions` and match with `Mode`, or else the engine will skip that Object/Block.
OR
If `Mode` is `change`: we do not care about the `Conditions`
Else:
If we omit the `Conditions`:
- Only `cfn` objects are involed when `Mode` is `provision` or `destroy`. (Original design. Backward compatibility.)
- Others will be skipped.
Else:
Objects will be involed when `Mode` matches with `Conditions`.
Return:
- `True` means skipped
- `False` means involved
"""
if (change_mode!=CHANGE_MODE_CHANGE):
if ('Conditions' not in change):
if (change['Object']==STR_CFN) and (change_mode in [CHANGE_MODE_PROVISION,CHANGE_MODE_DESTROY]):
return False
return True
elif (change_mode not in change['Conditions']):
return True
return False | a80365acc6f3390818f4c56a44ad4923f771fcee | 3,659,073 |
def rootfinder(*args):
"""
rootfinder(str name, str solver, dict:SX rfp, dict opts) -> Function
Create a solver for rootfinding problems Takes a function where one of the
rootfinder(str name, str solver, dict:MX rfp, dict opts) -> Function
rootfinder(str name, str solver, Function f, dict opts) -> Function
> rootfinder(str name, str solver, dict:SX rfp, dict opts)
------------------------------------------------------------------------
Create a solver for rootfinding problems Takes a function where one of the
inputs is unknown and one of the outputs is a residual function that is
always zero, defines a new function where the the unknown input has been
replaced by a guess for the unknown and the residual output has been
replaced by the calculated value for the input.
For a function [y0, y1, ...,yi, .., yn] = F(x0, x1, ..., xj, ..., xm), where
xj is unknown and yi=0, defines a new function [y0, y1, ...,xj, .., yn] =
G(x0, x1, ..., xj_guess, ..., xm),
xj and yi must have the same dimension and d(yi)/d(xj) must be invertable.
By default, the first input is unknown and the first output is the residual.
General information
===================
>List of available options
+------------------+-----------------+------------------+------------------+
| Id | Type | Description | Used in |
+==================+=================+==================+==================+
| common_options | OT_DICT | Options for | casadi::OracleFu |
| | | auto-generated | nction |
| | | functions | |
+------------------+-----------------+------------------+------------------+
| constraints | OT_INTVECTOR | Constrain the | casadi::Rootfind |
| | | unknowns. 0 | er |
| | | (default): no | |
| | | constraint on | |
| | | ui, 1: ui >= | |
| | | 0.0, -1: ui <= | |
| | | 0.0, 2: ui > | |
| | | 0.0, -2: ui < | |
| | | 0.0. | |
+------------------+-----------------+------------------+------------------+
| error_on_fail | OT_BOOL | When the | casadi::Rootfind |
| | | numerical | er |
| | | process returns | |
| | | unsuccessfully, | |
| | | raise an error | |
| | | (default false). | |
+------------------+-----------------+------------------+------------------+
| implicit_input | OT_INT | Index of the | casadi::Rootfind |
| | | input that | er |
| | | corresponds to | |
| | | the actual root- | |
| | | finding | |
+------------------+-----------------+------------------+------------------+
| implicit_output | OT_INT | Index of the | casadi::Rootfind |
| | | output that | er |
| | | corresponds to | |
| | | the actual root- | |
| | | finding | |
+------------------+-----------------+------------------+------------------+
| jacobian_functio | OT_FUNCTION | Function object | casadi::Rootfind |
| n | | for calculating | er |
| | | the Jacobian | |
| | | (autogenerated | |
| | | by default) | |
+------------------+-----------------+------------------+------------------+
| linear_solver | OT_STRING | User-defined | casadi::Rootfind |
| | | linear solver | er |
| | | class. Needed | |
| | | for | |
| | | sensitivities. | |
+------------------+-----------------+------------------+------------------+
| linear_solver_op | OT_DICT | Options to be | casadi::Rootfind |
| tions | | passed to the | er |
| | | linear solver. | |
+------------------+-----------------+------------------+------------------+
| monitor | OT_STRINGVECTOR | Set of user | casadi::OracleFu |
| | | problem | nction |
| | | functions to be | |
| | | monitored | |
+------------------+-----------------+------------------+------------------+
| specific_options | OT_DICT | Options for | casadi::OracleFu |
| | | specific auto- | nction |
| | | generated | |
| | | functions, | |
| | | overwriting the | |
| | | defaults from | |
| | | common_options. | |
| | | Nested | |
| | | dictionary. | |
+------------------+-----------------+------------------+------------------+
>Input scheme: casadi::RootfinderInput (ROOTFINDER_NUM_IN = 2)
+---------------+-------+---------------------------------+
| Full name | Short | Description |
+===============+=======+=================================+
| ROOTFINDER_X0 | x0 | Initial guess for the solution. |
+---------------+-------+---------------------------------+
| ROOTFINDER_P | p | Parameters. |
+---------------+-------+---------------------------------+
>Output scheme: casadi::RootfinderOutput (ROOTFINDER_NUM_OUT = 1)
+--------------+-------+--------------------------------------+
| Full name | Short | Description |
+==============+=======+======================================+
| ROOTFINDER_X | x | Solution to the system of equations. |
+--------------+-------+--------------------------------------+
List of plugins
===============
- kinsol
- fast_newton
- nlpsol
- newton
Note: some of the plugins in this list might not be available on your
system. Also, there might be extra plugins available to you that are not
listed here. You can obtain their documentation with
Rootfinder.doc("myextraplugin")
--------------------------------------------------------------------------------
kinsol
------
KINSOL interface from the Sundials suite
>List of available options
+---------------------------+-----------------+----------------------------+
| Id | Type | Description |
+===========================+=================+============================+
| abstol | OT_DOUBLE | Stopping criterion |
| | | tolerance |
+---------------------------+-----------------+----------------------------+
| disable_internal_warnings | OT_BOOL | Disable KINSOL internal |
| | | warning messages |
+---------------------------+-----------------+----------------------------+
| exact_jacobian | OT_BOOL | Use exact Jacobian |
| | | information |
+---------------------------+-----------------+----------------------------+
| f_scale | OT_DOUBLEVECTOR | Equation scaling factors |
+---------------------------+-----------------+----------------------------+
| iterative_solver | OT_STRING | gmres|bcgstab|tfqmr |
+---------------------------+-----------------+----------------------------+
| linear_solver_type | OT_STRING | dense|banded|iterative|use |
| | | r_defined |
+---------------------------+-----------------+----------------------------+
| lower_bandwidth | OT_INT | Lower bandwidth for banded |
| | | linear solvers |
+---------------------------+-----------------+----------------------------+
| max_iter | OT_INT | Maximum number of Newton |
| | | iterations. Putting 0 sets |
| | | the default value of |
| | | KinSol. |
+---------------------------+-----------------+----------------------------+
| max_krylov | OT_INT | Maximum Krylov space |
| | | dimension |
+---------------------------+-----------------+----------------------------+
| pretype | OT_STRING | Type of preconditioner |
+---------------------------+-----------------+----------------------------+
| strategy | OT_STRING | Globalization strategy |
+---------------------------+-----------------+----------------------------+
| u_scale | OT_DOUBLEVECTOR | Variable scaling factors |
+---------------------------+-----------------+----------------------------+
| upper_bandwidth | OT_INT | Upper bandwidth for banded |
| | | linear solvers |
+---------------------------+-----------------+----------------------------+
| use_preconditioner | OT_BOOL | Precondition an iterative |
| | | solver |
+---------------------------+-----------------+----------------------------+
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
fast_newton
-----------
Implements simple newton iterations to solve an implicit function.
>List of available options
+------------+-----------+-------------------------------------------------+
| Id | Type | Description |
+============+===========+=================================================+
| abstol | OT_DOUBLE | Stopping criterion tolerance on ||g||__inf) |
+------------+-----------+-------------------------------------------------+
| abstolStep | OT_DOUBLE | Stopping criterion tolerance on step size |
+------------+-----------+-------------------------------------------------+
| max_iter | OT_INT | Maximum number of Newton iterations to perform |
| | | before returning. |
+------------+-----------+-------------------------------------------------+
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
nlpsol
------
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
newton
------
Implements simple newton iterations to solve an implicit function.
>List of available options
+-----------------+-----------+--------------------------------------------+
| Id | Type | Description |
+=================+===========+============================================+
| abstol | OT_DOUBLE | Stopping criterion tolerance on max(|F|) |
+-----------------+-----------+--------------------------------------------+
| abstolStep | OT_DOUBLE | Stopping criterion tolerance on step size |
+-----------------+-----------+--------------------------------------------+
| max_iter | OT_INT | Maximum number of Newton iterations to |
| | | perform before returning. |
+-----------------+-----------+--------------------------------------------+
| print_iteration | OT_BOOL | Print information about each iteration |
+-----------------+-----------+--------------------------------------------+
--------------------------------------------------------------------------------
Joel Andersson
> rootfinder(str name, str solver, dict:MX rfp, dict opts)
> rootfinder(str name, str solver, Function f, dict opts)
------------------------------------------------------------------------
"""
return _casadi.rootfinder(*args) | 13ac40736849b6b4240dc5d22ad36aa472583bcb | 3,659,074 |
def f18(x, rotation=None, shift=None, shuffle=None):
"""
Hybrid Function 8 (N=5)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotation (matrix): Optional rotation matrix. If None (default), the
official matrix from the benchmark suite will be used.
shift (array): Optional shift vector. If None (default), the official
vector from the benchmark suite will be used.
shuffle (array): Optionbal shuffle vector. If None (default), the
official permutation vector from the benchmark suite will be used.
"""
nx = len(x)
if rotation is None:
rotation = transforms.rotations[nx][17]
if shift is None:
shift = transforms.shifts[17][:nx]
if shuffle is None:
shuffle = transforms.shuffles[nx][7]
x_transformed = np.matmul(rotation, x - shift)
x_parts = _shuffle_and_partition(x_transformed, shuffle, [0.2, 0.2, 0.2, 0.2, 0.2])
y = basic.high_conditioned_elliptic(x_parts[0])
y += basic.ackley(x_parts[1])
y += basic.rastrigin(x_parts[2])
y += basic.h_g_bat(x_parts[3])
y += basic.discus(x_parts[4])
return y + 1800.0 | d76b4aef256bdc72bc077adefda5e7a93f8ea500 | 3,659,075 |
def shake_256_len(data: bytes, length: int) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.shake_256_len(length), data) | c59f1a224264649573d53b67f170c7238b5a9840 | 3,659,076 |
def rgb_to_cmyk(color_values: tp.List[float]) -> tp.List[float]:
"""Converts list of RGB values to CMYK.
:param color_values: (list) 3-member RGB color value list
:return: (list) 4-member CMYK color value list
"""
if color_values == [0.0, 0.0, 0.0]:
return [0.0, 0.0, 0.0, 1.0]
r, g, b = color_values
c = 1.0 - r
m = 1.0 - g
y = 1.0 - b
min_cmy = min(c, m, y)
c = (c - min_cmy) / (1 - min_cmy)
m = (m - min_cmy) / (1 - min_cmy)
y = (y - min_cmy) / (1 - min_cmy)
k = min_cmy
return [c, m, y, k] | f0054c92e862c5f0a8b09f94c115c03150b3363b | 3,659,077 |
def check_family(matrix):
"""Check the validity of a family matrix for the vine copula.
Parameters:
----------
matrix : array
The pair-copula families.
Returns
-------
matrix : array
The corrected matrix.
"""
# TODO: check if the families are in the list of copulas
matrix = check_matrix(matrix)
matrix = check_triangular(matrix, k=1)
dim = matrix.shape[0]
for i in range(dim):
for j in range(i):
if isinstance(matrix[i, j], str):
matrix[i, j] = int(
R_VINECOPULA.BiCopName(matrix[i, j], False)[0])
elif isinstance(matrix[i, j], np.integer):
pass
matrix = matrix.astype(int)
return matrix | 49ef0a621c8c7f4063ff6e886ed5455bfa760213 | 3,659,078 |
import re
def parse_pgt_programmearray(url):
"""
Parse filter.js programmearray for pgt information
:param url: base url for timetabling system
:return: pgt programme name to id dict
"""
# get filter.js file
source = get_filterjs(url)
name_to_id = {}
# e.g. programmearray[340] [0] = "MSc Finance and Investment (Business Analytics)/F/02 - MSc Finance and
# Investment (Business Analytics)";
matches = re.findall(r'programmearray\[(\d{1,3})\] \[0\] = "(.*)";\s+'
r'programmearray\[\1\] \[1\] = ".*";\s+'
r'programmearray\[\1\] \[2\] = "(PGT/.*)"', source)
for match in matches:
# match e.g. ('0', 'MA Applied Linguistics/F/01 - EG04 Applied Linguistics', 'PGT/C1014/C7PAPLST/F/01')
name_to_id[match[1]] = match[2]
return name_to_id | b7901c37cdb931dce75a77422394f98b5e3898d1 | 3,659,079 |
def distance_calc(x1, y1, x2, y2):
"""Calculates distance between two points"""
return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5 | 4c0001d90d38371a5336e8163fbf63b3d6e95834 | 3,659,080 |
import requests
def LoadNasaData(lat, lon, show= False, selectparms= None):
""" Execute a request from NASA API for 10 years of atmospheric data
required to prepare daily statistical data used in Solar Insolation
calculations """
cmd = formulateRequest(-0.2739, 36.3765, selectparms)
jdi = requests.get(cmd[0]).json()
cols = cmd[1]
df = pd.json_normalize(jdi['features'][0]['properties']['parameter'][cols[0]]).T
df.index = pd.to_datetime(df.index)
df.rename(columns={0: cols[0]}, inplace= True)
for c in cols[1:]:
dfc = pd.json_normalize(jdi['features'][0]['properties']['parameter'][c]).T
dfc.index = pd.to_datetime(df.index)
dfc.rename(columns={0: c}, inplace= True)
df = df.join(dfc)
df['DayofYear'] = df.index.dayofyear
df = df[df['DayofYear'] != 366] #drop a day for leap years
atmo_dict = dict()
dg = df.groupby('DayofYear')
for col in cols:
dp = pd.DataFrame(dg[col].min())
dp.rename(columns={col: 'Min'}, inplace= True)
atmo_dict[col] = dp
dp = pd.DataFrame(dg[col].max())
dp.rename(columns={col: 'Max'}, inplace= True)
atmo_dict[col] = atmo_dict[col].join(dp)
dp = pd.DataFrame(dg[col].mean())
dp.rename(columns={col: 'S-Mean'}, inplace= True)
atmo_dict[col] = atmo_dict[col].join(dp)
dp = pd.DataFrame(dg[col].std())
dp.rename(columns={col: 'STDV'}, inplace= True)
atmo_dict[col] = atmo_dict[col].join(dp)
return atmo_dict | a2f61d20f46feee5d86bad2525c3bc20c3e00e14 | 3,659,081 |
from datetime import datetime
def mmyy_date_slicer(date_str):
"""Return start and end point for given date in mm-yy format.
:param date_str: date in mmyy format, i.e. "1222" or "0108".
:return: start and end date string for a given mmyy formatted date string
"""
# Initialize output
start = ""
end = ""
if mmyy_valid_date(date_str):
today = date.today()
# Check if date is in the future
dt_check = datetime.strptime(date_str, "%m%y")
if dt_check.date() <= today:
# Determine the start date string
datetime_object = datetime.strptime(date_str[0:2], "%m")
mo = datetime_object.strftime("%b")
yyyy = f"20{date_str[2:]}"
start = f'1 {mo}, {yyyy}'
# Determine the end date string.
mm = int(date_str[0:2])
if mm == today.month:
pass
elif mm == 12:
end = f"1 Jan, {int(yyyy)+1}"
else:
mm1 = int(date_str[0:2]) + 1
datetime_object = datetime.strptime(f"{mm1}", "%m")
mo1 = datetime_object.strftime("%b")
end = f'1 {mo1}, {yyyy}'
else:
# print(f'date in the future! > {date_str}')
return "", ""
else:
# print(f'date malformed! > {date_str}')
return "", ""
return start, end | f1c7f74f1824d5b4c410f3d8cc6ade15571fe3ca | 3,659,082 |
import typing
def constant_arg(name: str):
"""
Promises that the given arg will not be modified
Only affects mutable data types
Removes the need to copy the data during inlining
"""
def annotation(target: typing.Callable):
optimiser = _schedule_optimisation(target)
optimiser.constant_args.add(name)
return target
return annotation | fdd132d3beea900b81bfe645616b0c20933e22e3 | 3,659,083 |
def place_connection(body): # noqa: E501
"""Place an connection request from the SDX-Controller
# noqa: E501
:param body: order placed for creating a connection
:type body: dict | bytes
:rtype: Connection
"""
if connexion.request.is_json:
body = Connection.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | c69520f428f257a9eb3df97e060ad2e5cde45c94 | 3,659,084 |
def tag_list(request):
"""展示所有标签"""
return render(request, 'admin/tags_list.html') | 44b33197b3f8cb3467c8e7d32d4538f4dc2833b1 | 3,659,085 |
import requests
def scan_url(urls):
"""
Scan the url using the API
Args:
urls:
the list of urls
Returns:
A tuple of a bool indicating if all the urls are safe and a list indicating
the safeness of individual urls
"""
is_safe = True
safe_list = [True] * len(urls)
safe_browsing_url = "https://safebrowsing.googleapis.com/v4/threatMatches:find"
params = {"key": GOOGLE_TOKEN}
json = {
"threatInfo": {
"threatTypes": [
"THREAT_TYPE_UNSPECIFIED",
"MALWARE",
"SOCIAL_ENGINEERING",
"UNWANTED_SOFTWARE",
"POTENTIALLY_HARMFUL_APPLICATION",
],
"platformTypes": ["ANY_PLATFORM"],
"threatEntryTypes": ["URL"],
"threatEntries": [{"url": url} for url in urls],
}
}
r = requests.post(safe_browsing_url, params=params, json=json)
if r.status_code == 200:
results = r.json()
if "matches" in results and results["matches"]:
is_safe = False
matches = results["matches"]
urls_dict = {k: v for v, k in enumerate(urls)}
for match in matches:
safe_list[urls_dict[match["threat"]["url"]]] = False
return is_safe, safe_list | 9d3217a4c69a6a521d372b5692f93387ae7d61ad | 3,659,086 |
def head_to_tree(head, len_, prune, subj_pos, obj_pos):
"""
Convert a sequence of head indexes into a tree object.
"""
head = head[:len_].tolist()
root = None
if prune < 0:
nodes = [Tree() for _ in head]
for i in range(len(nodes)):
h = head[i]
nodes[i].idx = i
nodes[i].dist = -1 # just a filler
if h == 0:
root = nodes[i]
else:
nodes[h-1].add_child(nodes[i])
else:
# find dependency path
subj_pos = [i for i in range(len_) if subj_pos[i] == 0]
obj_pos = [i for i in range(len_) if obj_pos[i] == 0]
cas = None
subj_ancestors = set(subj_pos)
for s in subj_pos:
h = head[s]
tmp = [s]
while h > 0:
tmp += [h-1]
subj_ancestors.add(h-1)
h = head[h-1]
if cas is None:
cas = set(tmp)
else:
cas.intersection_update(tmp)
obj_ancestors = set(obj_pos)
for o in obj_pos:
h = head[o]
tmp = [o]
while h > 0:
tmp += [h-1]
obj_ancestors.add(h-1)
h = head[h-1]
cas.intersection_update(tmp)
# find lowest common ancestor
if len(cas) == 1:
lca = list(cas)[0]
else:
child_count = {k: 0 for k in cas}
for ca in cas:
if head[ca] > 0 and head[ca] - 1 in cas:
child_count[head[ca] - 1] += 1
# the LCA has no child in the CA set
for ca in cas:
if child_count[ca] == 0:
lca = ca
break
path_nodes = subj_ancestors.union(obj_ancestors).difference(cas)
path_nodes.add(lca)
# compute distance to path_nodes
dist = [-1 if i not in path_nodes else 0 for i in range(len_)]
for i in range(len_):
if dist[i] < 0:
stack = [i]
while stack[-1] >= 0 and stack[-1] not in path_nodes:
stack.append(head[stack[-1]] - 1)
if stack[-1] in path_nodes:
for d, j in enumerate(reversed(stack)):
dist[j] = d
else:
for j in stack:
if j >= 0 and dist[j] < 0:
dist[j] = int(1e4) # aka infinity
highest_node = lca
nodes = [Tree() if dist[i] <= prune else None for i in range(len_)]
for i in range(len(nodes)):
if nodes[i] is None:
continue
h = head[i]
nodes[i].idx = i
nodes[i].dist = dist[i]
if h > 0 and i != highest_node:
assert nodes[h-1] is not None
nodes[h-1].add_child(nodes[i])
root = nodes[highest_node]
assert root is not None
return root | 0459e6ebb0c64a8170970d44e5c6bcde5bb6221c | 3,659,087 |
def capped_subtraction(x, y):
"""Saturated arithmetics. Returns x - y truncated to the int64_t range."""
assert_is_int64(x)
assert_is_int64(y)
if y == 0:
return x
if x == y:
if x == INT_MAX or x == INT_MIN:
raise OverflowError(
'Integer NaN: subtracting INT_MAX or INT_MIN to itself')
return 0
if x == INT_MAX or x == INT_MIN:
return x
if y == INT_MAX:
return INT_MIN
if y == INT_MIN:
return INT_MAX
return to_capped_int64(x - y) | c4a171497ff351c22df3fc831a1e840366a90c5b | 3,659,088 |
def evaluate_points(func, begin, total_samps, var_list, attr):
"""
Inputs: func- the lambda function used to generate the data from the
evaluation vector
begin- the index to start at in the `attr` array
total_samps- the total number of samples to generate
var_list- list of the variables
attr- the attribute that holds the values to be used in the
evaluation vector
Identical to evaluate_points_verbose, but doesn't check for a verbose
option every iteration. This version also deals with indexing only part of
eval_vect.
"""
var_count = len(var_list)
term_count = func(np.zeros(var_count)).shape
if len(term_count) > 0:
term_count = term_count[1] # len(func(np.zeros(var_count)))
else:
term_count = 1
eval_vect = np.zeros([total_samps, var_count])
matrix = np.zeros([total_samps, term_count])
end = begin + total_samps
for j in range(var_count):
attr_arr = getattr(var_list[j], attr)
eval_vect[:, j] = attr_arr[begin:end].T
for i in range(total_samps):
matrix[i, :] = func(eval_vect[i, :])
return matrix | 68ba7eb95e8d26becbef58f14e3073f7ed184a5b | 3,659,089 |
import os
def get_username():
"""Return username
Return a useful username even if we are running under HT-Condor.
Returns
-------
str : username
"""
batch_system = os.environ.get('BATCH_SYSTEM')
if batch_system == 'HTCondor':
return os.environ.get('USER', '*Unknown user*')
return os.getlogin() | 1e199e7e38ed6532253ee1f7f229bb1baffc5538 | 3,659,090 |
def corresponding_chromaticities_prediction_CIE1994(experiment=1):
"""
Returns the corresponding chromaticities prediction for *CIE 1994*
chromatic adaptation model.
Parameters
----------
experiment : integer or CorrespondingColourDataset, optional
{1, 2, 3, 4, 6, 8, 9, 11, 12}
*Breneman (1987)* experiment number or
:class:`colour.CorrespondingColourDataset` class instance. Returns
-------
tuple
Corresponding chromaticities prediction.
References
----------
:cite:`Breneman1987b`, :cite:`CIETC1-321994b`
Examples
--------
>>> from pprint import pprint
>>> pr = corresponding_chromaticities_prediction_CIE1994(2)
>>> pr = [(p.uv_m, p.uv_p) for p in pr]
>>> pprint(pr) # doctest: +ELLIPSIS
[(array([ 0.207, 0.486]), array([ 0.2273130..., 0.5267609...])),
(array([ 0.449, 0.511]), array([ 0.4612181..., 0.5191849...])),
(array([ 0.263, 0.505]), array([ 0.2872404..., 0.5306938...])),
(array([ 0.322, 0.545]), array([ 0.3489822..., 0.5454398...])),
(array([ 0.316, 0.537]), array([ 0.3371612..., 0.5421567...])),
(array([ 0.265, 0.553]), array([ 0.2889416..., 0.5534074...])),
(array([ 0.221, 0.538]), array([ 0.2412195..., 0.5464301...])),
(array([ 0.135, 0.532]), array([ 0.1530344..., 0.5488239...])),
(array([ 0.145, 0.472]), array([ 0.1568709..., 0.5258835...])),
(array([ 0.163, 0.331]), array([ 0.1499762..., 0.4401747...])),
(array([ 0.176, 0.431]), array([ 0.1876711..., 0.5039627...])),
(array([ 0.244, 0.349]), array([ 0.2560012..., 0.4546263...]))]
"""
experiment_results = (convert_experiment_results_Breneman1987(experiment)
if is_numeric(experiment) else experiment)
with domain_range_scale(1):
XYZ_t, XYZ_r = experiment_results.XYZ_t, experiment_results.XYZ_r
xy_o1, xy_o2 = XYZ_to_xy([XYZ_t, XYZ_r])
uv_t = Luv_to_uv(XYZ_to_Luv(experiment_results.XYZ_ct, xy_o1), xy_o1)
uv_m = Luv_to_uv(XYZ_to_Luv(experiment_results.XYZ_cr, xy_o2), xy_o2)
Y_r = experiment_results.B_r
E_o1, E_o2 = experiment_results.Y_t, experiment_results.Y_r
XYZ_1 = experiment_results.XYZ_ct
XYZ_2 = chromatic_adaptation_CIE1994(XYZ_1, xy_o1, xy_o2, Y_r, E_o1,
E_o2)
uv_p = Luv_to_uv(XYZ_to_Luv(XYZ_2, xy_o2), xy_o2)
return tuple([
CorrespondingChromaticitiesPrediction(experiment_results.name,
uv_t[i], uv_m[i], uv_p[i])
for i in range(len(uv_t))
]) | f138ec844b2712d0d09be630f912f156aa50acbd | 3,659,091 |
def interpExtrap(x, xp, yp):
"""numpy.interp interpolation function extended by linear extrapolation."""
y = np.interp(x, xp, yp)
y = np.where(x < xp[0], yp[0]+(x-xp[0])*(yp[0]-yp[1])/(xp[0]-xp[1]), y)
return np.where(x > xp[-1], yp[-1]+(x-xp[-1])*(yp[-1]-yp[-2]) /
(xp[-1]-xp[-2]), y) | 8a0acc55e146a29171ef6648897cd5eba7e23c12 | 3,659,092 |
import logging
import subprocess
def get_operating():
"""Get latest operating budgets from shared drive."""
logging.info('Retrieving latest operating budget')
command = "smbclient //ad.sannet.gov/dfs " \
+ "--user={adname}%{adpass} -W ad -c " \
+ "'prompt OFF;"\
+ " cd \"FMGT-Shared/Shared/BUDGET/" \
+ "Open Data/Open Data Portal/" \
+ "Shared with Performance and Analytics/" \
+ "Budget/Operating/\";" \
+ " lcd \"/data/temp/\";" \
+ " mget FY*BUDGET.xlsx;'"
command = command.format(adname=conf['alb_sannet_user'],
adpass=conf['alb_sannet_pass'],
temp_dir=conf['temp_data_dir'])
logging.info(command)
try:
p = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
return p
except subprocess.CalledProcessError as e:
return e.output | 21add01cda622abd366dfb7f5d5af54f5b9de6b0 | 3,659,093 |
import json
def get_nome_socio(id):
"""pega o nome de um livro pelo id."""
if request.method == 'GET':
try:
socio = db.query_bd('select * from socio where id = "%s"' % id)
if socio:
print(socio)
socio = socio[0]
print(socio)
content = {
'nome': socio['nome'],
'status': socio['status']
}
return json.dumps(content)
except Exception as e:
print(e)
return render_template('404.html') | 8945ef5bbc46cd8b6c79903f1dd0d3d226860792 | 3,659,094 |
def processDeps(element: etree.Element, params: dict = {}) -> None:
"""Function to NAF deps layer to RDF
Args:
element: element containing the deps layer
params: dict of params to store results
Returns:
None
"""
output = params["out"]
for dep in element:
if dep.tag == "dep":
# depname = genDepName(params)
# output.write(" xl:type naf-base:dep ;\n")
rfunc = dep.attrib["rfunc"]
to_term = dep.attrib["to_term"]
from_term = dep.attrib["from_term"]
output.write(
"_:" + from_term + " " + "naf-rfunc:" + rfunc + " _:" + to_term + "\n"
)
# for key in dep.attrib.keys():
# if (key != "id"):
# if key == "rfunc":
# output.write(" naf-base:"+attrib2pred(key)+' naf-base:'+dep.attrib[key]+' ;\n')
# else:
# output.write(" naf-base:"+attrib2pred(key)+' _:'+dep.attrib[key]+' ;\n')
output.write(" .\n")
return None | a096e42a3a036048daf97079f9e5bb78f5f068d9 | 3,659,095 |
def fit_solution_matrix(weights, design_matrix, cache=None, hash_decimal=10, fit_mat_key=None):
"""
Calculate the linear least squares solution matrix
from a design matrix, A and a weights matrix W
S = [A^T W A]^{-1} A^T W
Parameters
----------
weights: array-like
ndata x ndata matrix of data weights
design_matrx: array-like
ndata x n_fit_params matrix transforming fit_parameters to data
cache: optional dictionary
optional dictionary storing pre-computed fitting matrix.
hash_decimal: int optional
the number of decimals to use in hash for caching. default is 10
fit_mat_key: optional hashable variable
optional key. If none is used, hash fit matrix against design and
weighting matrix.
Returns
-----------
array-like
n_fit_params x n_fit_params matrix
S = [A^T W A]^{-1} A ^T W
"""
if cache is None:
cache = {}
ndata = weights.shape[0]
if not weights.shape[0] == weights.shape[1]:
raise ValueError("weights must be a square matrix")
if not design_matrix.shape[0] == ndata:
raise ValueError("weights matrix incompatible with design_matrix!")
if fit_mat_key is None:
opkey = ('fitting_matrix',) + tuple(np.round(weights.flatten(), hash_decimal))\
+tuple(np.round(design_matrix.flatten(), hash_decimal))
else:
opkey = fit_mat_key
if not opkey in cache:
#check condition number
cmat = np.conj(design_matrix.T) @ weights @ design_matrix
#should there be a conjugation!?!
if np.linalg.cond(cmat)>=1e9:
warn('Warning!!!!: Poorly conditioned matrix! Your linear inpainting IS WRONG!')
cache[opkey] = np.linalg.pinv(cmat) @ np.conj(design_matrix.T) @ weights
else:
try:
cache[opkey] = np.linalg.inv(cmat) @ np.conj(design_matrix.T) @ weights
except np.linalg.LinAlgError as error:
print(error)
cache[opkey] = None
return cache[opkey] | 3b237600ab2ae266cec1cdc3f1fc650cc02b82d8 | 3,659,096 |
def version_from(schema_path, document_path):
"""HACK A DID ACK derives non-default 1.1 version from path."""
LOG.debug("xml version derivation flat inspection schema_path=%s", schema_path)
if CRVF_PRE_OASIS_SEMANTIC_VERSION in str(schema_path):
return CRVF_PRE_OASIS_SEMANTIC_VERSION
if CRVF_DEFAULT_SEMANTIC_VERSION in str(schema_path):
return CRVF_DEFAULT_SEMANTIC_VERSION
LOG.debug("xml version derivation deep call document_path=%s", document_path)
return version_peek(document_path) | f63c34e5467c098d0461b25699fe2d7a406508d9 | 3,659,097 |
def nessus_vuln_check(request):
"""
Get the detailed vulnerability information.
:param request:
:return:
"""
if request.method == 'GET':
id_vul = request.GET['vuln_id']
else:
id_vul = ''
vul_dat = nessus_report_db.objects.filter(vul_id=id_vul)
return render(request, 'nessus_vuln_data.html', {'vul_dat': vul_dat}) | b1b9e2cce8b00f4a837f2712abd2dc4e2f5edb3d | 3,659,098 |
def wait_for_job_completion(namespace, timeout, error_msg):
"""
This is a WORKAROUND of particular ocsci design choices: I just wait
for one pod in the namespace, and then ask for the pod again to get
it's name (but it would be much better to just wait for the job to
finish instead, then ask for a name of the successful pod and use it
to get logs ...)
Returns:
str: name of Pod resource of the finished job
"""
ocp_pod = ocp.OCP(kind="Pod", namespace=namespace)
try:
ocp_pod.wait_for_resource(
resource_count=1,
condition=constants.STATUS_COMPLETED,
timeout=timeout,
sleep=30)
except TimeoutExpiredError as ex:
# report some high level error as well
logger.error(error_msg)
# TODO: log both describe and the output from the fio pods, as DEBUG
ex.message = error_msg
raise(ex)
# indentify pod of the completed job
pod_data = ocp_pod.get()
# explicit list of assumptions, if these assumptions are not met, the
# code won't work and it either means that something went terrible
# wrong or that the code needs to be changed
assert pod_data['kind'] == "List"
pod_dict = pod_data['items'][0]
assert pod_dict['kind'] == "Pod"
pod_name = pod_dict['metadata']['name']
logger.info(f"Identified pod name of the finished Job: {pod_name}")
pod_name = pod_dict['metadata']['name']
return pod_name | 652834a4d28232895fc6da9c74f7e08cdbee4798 | 3,659,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.