content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from datetime import datetime
def get_slurm_params(n,runtime=None,mem=None,n_jobs=None):
"""Get remaining parameters to submit SLURM jobs based on specified parameters and number of files to process.
Parameters
----------
n : int
Number of files to process.
runtime : str, None
Time per run, string formatted 'hours:minutes:seconds".
mem : str, None
Memory, string formatted for SLURM e.g. '1G', '500MB'.
n_jobs : int, None
Number of SLURM jobs to launch.
Returns
-------
str
Time per job.
str
Memory per job.
int
Number of jobs.
"""
#TIME ~5s per subject (ADHD200 and fmri dev dataset)
#MEM 1G overall (cleans up after each subject, takes about peak around ~500)
#Tested w/ MIST64 and MIST444
if mem == None:
mem = '1G'
if runtime==None:
if n_jobs==None:
if n < 1000:
n_per_job = 50
elif n < 10000:
n_per_job = 200
else:
n_per_job = 500
n_jobs = int(n/n_per_job)
else:
n_per_job = int(n/n_jobs) #round down (add one later to calc for time)
if n_per_job == 0:
n_per_job = 1
sec = 2*n_per_job*5 #(seconds)
if sec < 300:
sec = 300
runtime = str(datetime.timedelta(seconds=sec))
else:
if len(runtime.split(':')) == 3:
sec = int(runtime.split(':')[0])*3600 + int(runtime.split(':')[1])*60 + int(runtime.split(':')[2])
elif len(runtime.split(':')) == 2:
sec = int(runtime.split(':')[1])*60 + int(runtime.split(':')[2])
if n_jobs == None:
n_jobs = int((10*n)/sec)
if n_jobs == 0:
n_jobs = 1
return runtime,mem,n_jobs | f2bf08430fbde0dcc430fd3e01d6b5ca1bd64487 | 9,000 |
import time
import os
from datetime import datetime
def get_db_comment_text(file_name) -> DataFrame:
"""
db_comment 파일에서 text를 추출하여 DataFrame type으로 return
:param file_name: 입력 파일명 (str type)
:return: 입력 파일에서 추출한 text
"""
# :return: 입력 파일에서 추출한 text에 형태소 분석기로 명사 추출한 DataFrame
start_time = time.time()
print('\r\nget_db_comment_text: %s' % file_name)
excel_app = win32com.client.Dispatch('Excel.Application')
full_path_file_name = os.path.abspath(file_name)
excel_file = excel_app.Workbooks.Open(full_path_file_name, True)
# region Table comment
table_comment_sheet = excel_file.Worksheets(1)
last_row = table_comment_sheet.Range("A1").End(-4121).Row # -4121: xlDown
table_comment_range = 'A2:D%s' % (str(last_row))
print('table_comment_range : %s (%d rows)' % (table_comment_range, last_row - 1))
table_comments = table_comment_sheet.Range(table_comment_range).Value2
df_table = pd.DataFrame(list(table_comments),
columns=['DB', 'Schema', 'Table', 'Text'])
df_table['FileName'] = full_path_file_name
df_table['FileType'] = 'table'
df_table['Page'] = 0
df_table = df_table[df_table.Text.notnull()] # Text 값이 없는 행 제거
df_table['Source'] = df_table['DB'] + '.' + df_table['Schema'] + '.' + df_table['Table'] \
+ '(' + df_table['Text'].astype(str) + ')'
# print(df_table)
# endregion
# region Column comment
column_comment_sheet = excel_file.Worksheets(2)
last_row = column_comment_sheet.Range("A1").End(-4121).Row # -4121: xlDown
column_comment_range = 'A2:E%s' % (str(last_row))
print('column_comment_range : %s (%d rows)' % (column_comment_range, last_row - 1))
column_comments = column_comment_sheet.Range(column_comment_range).Value2
df_column = pd.DataFrame(list(column_comments),
columns=['DB', 'Schema', 'Table', 'Column', 'Text'])
df_column['FileName'] = full_path_file_name
df_column['FileType'] = 'column'
df_column['Page'] = 0
df_column = df_column[df_column.Text.notnull()] # Text 값이 없는 행 제거
df_column['Source'] = df_column['DB'] + '.' + df_column['Schema'] + '.' + df_column['Table'] \
+ '.' + df_column['Column'] + '(' + df_column['Text'].astype(str) + ')'
# print(df_column)
# endregion
excel_file.Close()
df_text = df_column.append(df_table, ignore_index=True)
# print(df_text)
end_time = time.time()
# elapsed_time = end_time - start_time
elapsed_time = str(datetime.timedelta(seconds=end_time - start_time))
print('[pid:%d] get_db_comment_text elapsed time: %s' % (os.getpid(), elapsed_time))
print('text count: %s' % str(df_text.shape[0]))
# return get_word_list(df_text)
return df_text | ec36b3ac6a25e3fb5052f32fc11efe306db12a0e | 9,001 |
def source_open() -> bool:
"""Open a source MS Excel spreadsheet file.
Returns
-------
boolean
Flag about successful processing.
"""
try:
Source.wbook = openpyxl.load_workbook(cmdline.workbook)
except Exception:
logger.error(
'Cannot open the MS Excel workbook %s',
cmdline.workbook
)
return False
return True | 19a2c214131afa6c1126bc1e0a4b4892a13bc32b | 9,002 |
from pathlib import Path
import yaml
import os
import requests
import json
def make_prompt(token: str, config: Path, model: str = ''):
"""Make a summary using the Studio21 API
Args:
token (str): Your api token to use.
config (Path): The path to the config file.
model (str, optional): Which model to use. If empty
then read the model from the config file. Defaults to ''.
Returns:
bool: Whether or not to continue calling the api.
"""
header = {'Authorization': f'Bearer {token}'}
with open(config) as f:
cfg = yaml.safe_load(f)
if not model:
model = cfg['model']
logger.debug(f'Using model {model} for generation.')
url = f'https://api.ai21.com/studio/v1/j1-{model}/complete'
cfg_name = os.path.basename(config)
prompt, extra, output_dir = generate_summary_prompt('studio21', config=cfg_name)
# If the prompt is over 1900 tokens we will most likely get
# An API error. The model can only take 2048 tokens.
prompt_tokens = len(prompt.split())
if prompt_tokens > 1800:
logger.warning(f'Our prompt was too long. Had {prompt_tokens} tokens.')
return True
else:
logger.debug(f'Our prompt had {prompt_tokens} tokens.')
data = {'prompt': prompt, **cfg['apiParams']}
result = requests.post(url, headers=header, json=data)
if result.status_code >= 400:
logger.critical(f'API request error!!! {result.status_code}: {result.text} {result.reason}')
# A 429 status code means we have reached our quota. So we return false.
# Any other code we ignore and continue.
return result.status_code != 429
else:
text = result.json()['completions'][0]['data']['text']
json.dump(result.json(), open(output_dir+'/output.json', 'w'), indent=4)
with open(f'{output_dir}/{cfg["summaryType"]}.txt', 'w') as f:
f.write(text+'\n'+extra)
return True | 865dde67278c21c1dee075c5a831281d59a311c8 | 9,003 |
import re
def get_license_match_error(lic, lic_file_path):
"""Returns an Error of the type 'warning' if the FreeRTOS license is present in the
input file. Otherwise an empty list is returned.
"""
# Get the words in the license template
with open('license.templ', 'r') as file:
template_lic = file.read()
template_lic_words = list(filter(None, re.split('[^0-9a-zA-Z]+', template_lic)))
# Split on non-alphanumeric characters
# re.split() will match the empty string.
lic_words = list(filter(None, re.split('[^0-9a-zA-Z]+', lic)))
i = 0
same = False
for i, word in enumerate(lic_words):
if word == template_lic_words[0]:
# Element wise comparison of the two arrays.
if lic_words[i:i+len(template_lic_words)] == template_lic_words:
same = True
break
if same:
return [Error(type='warning', info='FreeRTOS license is in file: ' + lic_file_path)]
return [] | d3f53f3d25c4d56b41fb561cf37b845d1efdc9fe | 9,004 |
import queue
def start_workers(size, delete=False, migrate=False):
"""Starts FluxxWorkers.
:returns: Pair of queues.
"""
streams = (queue.Queue(), queue.Queue(maxsize=size))
for _ in range(THREAD_COUNT):
worker = FluxxWorker(streams, delete, migrate)
worker.daemon = True
worker.start()
return streams | 358d8d3bc0d12edbe9e422cdfc206de626fd2a7d | 9,005 |
def harmonizationApply(data, covars, model):
"""
Applies harmonization model with neuroCombat functions to new data.
Arguments
---------
data : a numpy array
data to harmonize with ComBat, dimensions are N_samples x N_features
covars : a pandas DataFrame
contains covariates to control for during harmonization
all covariates must be encoded numerically (no categorical variables)
must contain a single column "SITE" with site labels for ComBat
dimensions are N_samples x (N_covariates + 1)
model : a dictionary of model parameters
the output of a call to harmonizationLearn()
Returns
-------
bayes_data : a numpy array
harmonized data, dimensions are N_samples x N_features
"""
# transpose data as per ComBat convention
data = data.T
# prep covariate data
batch_col = covars.columns.get_loc('SITE')
cat_cols = []
num_cols = [covars.columns.get_loc(c) for c in covars.columns if c!='SITE']
covars = np.array(covars, dtype='object')
# load the smoothing model
smooth_model = model['smooth_model']
smooth_cols = smooth_model['smooth_cols']
### additional setup code from neuroCombat implementation:
# convert batch col to integer
covars[:,batch_col] = np.unique(covars[:,batch_col],return_inverse=True)[-1]
# create dictionary that stores batch info
(batch_levels, sample_per_batch) = np.unique(covars[:,batch_col],return_counts=True)
info_dict = {
'batch_levels': batch_levels.astype('int'),
'n_batch': len(batch_levels),
'n_sample': int(covars.shape[0]),
'sample_per_batch': sample_per_batch.astype('int'),
'batch_info': [list(np.where(covars[:,batch_col]==idx)[0]) for idx in batch_levels]
}
###
# check sites are identical in training dataset
check_sites = info_dict['n_batch']==model['info_dict']['n_batch']
if not check_sites:
raise ValueError('Number of sites in holdout data not identical to training data.')
# apply ComBat without re-learning model parameters
design = make_design_matrix(covars, batch_col, cat_cols, num_cols)
### additional setup if smoothing is performed
if smooth_model['perform_smoothing']:
# create cubic spline basis for smooth terms
X_spline = covars[:, smooth_cols].astype(float)
bs_basis = smooth_model['bsplines_constructor'].transform(X_spline)
# construct formula and dataframe required for gam
formula = 'y ~ '
df_gam = {}
for b in batch_levels:
formula = formula + 'x' + str(b) + ' + '
df_gam['x' + str(b)] = design[:, b]
for c in num_cols:
if c not in smooth_cols:
formula = formula + 'c' + str(c) + ' + '
df_gam['c' + str(c)] = covars[:, c].astype(float)
formula = formula[:-2] + '- 1'
df_gam = pd.DataFrame(df_gam)
# check formulas are identical in training dataset
check_formula = formula==smooth_model['formula']
if not check_formula:
raise ValueError('GAM formula for holdout data not identical to training data.')
# for matrix operations, a modified design matrix is required
design = np.concatenate((df_gam, bs_basis), axis=1)
###
s_data, stand_mean, var_pooled = ApplyStandardizationAcrossFeatures(data, design, info_dict, model)
bayes_data = adjust_data_final(s_data, design, model['gamma_star'], model['delta_star'],
stand_mean, var_pooled, info_dict)
# transpose data to return to original shape
bayes_data = bayes_data.T
return bayes_data | 7789d3a75d043df5048a7b0adced771c7e1ddd81 | 9,006 |
import re
def from_rkm(code):
"""Convert an RKM code string to a string with a decimal point.
Parameters
----------
code : str
RKM code string.
Returns
-------
str
String with a decimal point and an R value.
Examples
--------
>>> from pyaedt.circuit import from_rkm
>>> from_rkm('R47')
'0.47'
>>> from_rkm('4R7')
'4.7'
>>> from_rkm('470R')
'470'
>>> from_rkm('4K7')
'4.7k'
>>> from_rkm('47K')
'47k'
>>> from_rkm('47K3')
'47.3k'
>>> from_rkm('470K')
'470k'
>>> from_rkm('4M7')
'4.7M'
"""
# Matches RKM codes that start with a digit.
# fd_pattern = r'([0-9]+)([LREkKMGTFmuµUnNpP]+)([0-9]*)'
fd_pattern = r'([0-9]+)([{}]+)([0-9]*)'.format(''.join(RKM_MAPS.keys()), )
# matches rkm codes that end with a digit
# ld_pattern = r'([0-9]*)([LREkKMGTFmuµUnNpP]+)([0-9]+)'
ld_pattern = r'([0-9]*)([{}]+)([0-9]+)'.format(''.join(RKM_MAPS.keys()))
fd_regex = re.compile(fd_pattern, re.I)
ld_regex = re.compile(ld_pattern, re.I)
for regex in [fd_regex, ld_regex]:
m = regex.match(code)
if m:
fd, base, ld = m.groups()
ps = RKM_MAPS[base]
if ld:
return_str = ''.join([fd, '.', ld, ps])
else:
return_str = ''.join([fd, ps])
return return_str
return code | 8cb41a58fab685e5e7de4af533fade1aeee09c2c | 9,007 |
def get_arguments(method, rpc_version):
"""
Get arguments for method in specified Transmission RPC version.
"""
if method in ('torrent-add', 'torrent-get', 'torrent-set'):
args = constants.TORRENT_ARGS[method[-3:]]
elif method in ('session-get', 'session-set'):
args = constants.SESSION_ARGS[method[-3:]]
else:
return ValueError('Method "%s" not supported' % (method))
accessible = []
for argument, info in args.iteritems():
valid_version = True
if rpc_version < info[1]:
valid_version = False
if info[2] and info[2] <= rpc_version:
valid_version = False
if valid_version:
accessible.append(argument)
return accessible | dcd8b3f0e5e93409518d7e9d72ffe954c3b99915 | 9,008 |
import functools
def compose_local_noises(*functions: NoiseModel) -> NoiseModel:
"""Helper to compose multiple NoiseModel.
Args:
*functions: a list of functions
Returns:
The mathematical composition of *functions. The last element is applied
first. If *functions is [f, g, h], it returns f∘g∘h.
"""
return functools.reduce(
lambda f, g: lambda x: f(g(x)), functions, lambda x: x
) | 4b6e90ff2def9a988d8aa66782d990971b8de586 | 9,009 |
import copy
def sls_build(
repository, tag="latest", base="opensuse/python", mods=None, dryrun=False, **kwargs
):
"""
.. versionchanged:: 2018.3.0
The repository and tag must now be passed separately using the
``repository`` and ``tag`` arguments, rather than together in the (now
deprecated) ``image`` argument.
Build a Docker image using the specified SLS modules on top of base image
.. versionadded:: 2016.11.0
The base image does not need to have Salt installed, but Python is required.
repository
Repository name for the image to be built
.. versionadded:: 2018.3.0
tag : latest
Tag name for the image to be built
.. versionadded:: 2018.3.0
name
.. deprecated:: 2018.3.0
Use both ``repository`` and ``tag`` instead
base : opensuse/python
Name or ID of the base image
mods
A string containing comma-separated list of SLS with defined states to
apply to the base image.
saltenv : base
Specify the environment from which to retrieve the SLS indicated by the
`mods` parameter.
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
.. versionadded:: 2018.3.0
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
.. versionadded:: 2018.3.0
dryrun: False
when set to True the container will not be committed at the end of
the build. The dryrun succeed also when the state contains errors.
**RETURN DATA**
A dictionary with the ID of the new container. In case of a dryrun,
the state result is returned and the container gets removed.
CLI Example:
.. code-block:: bash
salt myminion docker.sls_build imgname base=mybase mods=rails,web
"""
create_kwargs = __utils__["args.clean_kwargs"](**copy.deepcopy(kwargs))
for key in ("image", "name", "cmd", "interactive", "tty", "extra_filerefs"):
try:
del create_kwargs[key]
except KeyError:
pass
# start a new container
ret = create(
image=base, cmd="sleep infinity", interactive=True, tty=True, **create_kwargs
)
id_ = ret["Id"]
try:
start_(id_)
# Now execute the state into the container
ret = sls(id_, mods, **kwargs)
# fail if the state was not successful
if not dryrun and not __utils__["state.check_result"](ret):
raise CommandExecutionError(ret)
if dryrun is False:
ret = commit(id_, repository, tag=tag)
finally:
stop(id_)
rm_(id_)
return ret | d3d047334ea8b02e61d26b3fc471eb2cedd7a8c5 | 9,010 |
import re
from datetime import datetime
def parse_date(date):
"""
Parses a date string and returns number of seconds from the EPOCH.
"""
# yyyy-mm-dd [hh:mm:ss[.s][ [+-]hh[:][mm]]]
p = re.compile( r'''(?P<year>\d{1,4}) # yyyy
- #
(?P<month>\d{1,2}) # mm or m
- #
(?P<day>\d{1,2}) # dd or d
#
(?: # [optional time and timezone]
(?:\s|T) #
(?P<hour>\d{1,2}) # hh or h
:? #
(?P<min>\d{1,2})? # mm or m
(?: # [optional seconds]
: #
(?P<sec>\d{1,2}) # ss or s
#
(?: # [optional decisecond]
\. # .
(?P<dsec>\d) # s
)? #
)? #
(?: # [optional timezone]
\s? #
((?: #
(?P<ho>[+-]? # [+ or -]
\d{1,2}) # hh or h
:? # [:]
(?P<mo>\d{2})? # [mm]
) #
| # or
(?:UTC)|(?:Z)) # UTC | Z
)? #
)? #
$ # EOL
''', re.VERBOSE)
m = p.match(date)
if m:
c = m.groupdict(0)
for k, v in c.items():
c[k] = int(v)
# get timezone offset in seconds
tz_offset = c['ho']*HOUR + c['mo']*MINUTE
# Some datasets use the date "0000-01-01 00:00:00" as an origin, even though
# the year zero does not exist in the Gregorian/Julian calendars.
if c['year'] == 0:
c['year'] = 1
year_offset = LEAP_YEAR
else:
year_offset = 0
origin = datetime(c['year'], c['month'], c['day'], c['hour'], c['min'], c['sec'], c['dsec'] * 100000)
dt = origin - EPOCH
return dt.days*DAY + dt.seconds + dt.microseconds*MICROSECOND - year_offset - tz_offset
raise ParserError('Invalid date: %s' % date) | 44dbf7c9ded2004118b64827e5c5016dc3967ec6 | 9,011 |
def CorrectOrWrong(Input,word):
"""Check if Input is inside word"""
if Input in word:
return True
else:
return False | fa3f06fd156c2523334a057366e88c5b7b376eb1 | 9,012 |
def get_fair_metrics(dataset, pred, pred_is_dataset=False):
"""
Measure fairness metrics.
Parameters:
dataset (pandas dataframe): Dataset
pred (array): Model predictions
pred_is_dataset, optional (bool): True if prediction is already part of the dataset, column name 'labels'.
Returns:
fair_metrics: Fairness metrics.
"""
if pred_is_dataset:
dataset_pred = pred
else:
dataset_pred = dataset.copy()
dataset_pred.labels = pred
cols = ['statistical_parity_difference', 'equal_opportunity_difference', 'average_abs_odds_difference', 'disparate_impact', 'theil_index']
obj_fairness = [[0,0,0,1,0]]
fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols)
for attr in dataset_pred.protected_attribute_names:
idx = dataset_pred.protected_attribute_names.index(attr)
privileged_groups = [{attr:dataset_pred.privileged_protected_attributes[idx][0]}]
unprivileged_groups = [{attr:dataset_pred.unprivileged_protected_attributes[idx][0]}]
classified_metric = ClassificationMetric(dataset,
dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
metric_pred = BinaryLabelDatasetMetric(dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
acc = classified_metric.accuracy()
row = pd.DataFrame([[metric_pred.mean_difference(),
classified_metric.equal_opportunity_difference(),
classified_metric.average_abs_odds_difference(),
metric_pred.disparate_impact(),
classified_metric.theil_index()]],
columns = cols,
index = [attr]
)
fair_metrics = fair_metrics.append(row)
fair_metrics = fair_metrics.replace([-np.inf, np.inf], 2)
return fair_metrics | 1cf4a8655bf569f5d8ddfa530f46c65fe8f2be3f | 9,013 |
from typing import Sequence
from typing import Dict
from typing import Union
from typing import Tuple
def make_params(
key_parts: Sequence[str],
variable_parts: VariablePartsType) -> Dict[str, Union[str, Tuple[str]]]:
"""
Map keys to variables. This map\
URL-pattern variables to\
a URL related parts
:param key_parts: A list of URL parts
:param variable_parts: A linked-list\
(ala nested tuples) of URL parts
:return: The param dict with the values\
assigned to the keys
:private:
"""
# The unwrapped variable parts are in reverse order.
# Instead of reversing those we reverse the key parts
# and avoid the O(n) space required for reversing the vars
return dict(zip(reversed(key_parts), _unwrap(variable_parts))) | 4da736f2057e06be1ceb51968d6c205cd28b7093 | 9,014 |
def load_sentiments(file_name=DATA_PATH + "sentiments.csv"):
"""Read the sentiment file and return a dictionary containing the sentiment
score of each word, a value from -1 to +1.
"""
sentiments = {}
for line in open(file_name):
word, score = line.split(',')
sentiments[word] = float(score.strip())
return sentiments | a98ae77a051ea3b599ee2fd5036e1bd33c1f4d64 | 9,015 |
def run_example(
device_id: str,
server_host: str = "localhost",
server_port: int = 8004,
plot: bool = True,
scope_length: int = 8192,
historylength: int = 1,
):
"""run the example."""
apilevel_example = 6 # The API level supported by this example.
# Call a zhinst utility function that returns:
# - an API session `daq` in order to communicate with devices via the data server.
# - the device ID string that specifies the device branch in the server's node hierarchy.
# - the device's discovery properties.
# This example can't run with HF2 Instruments or instruments without the DIG option.
(daq, device, props) = zhinst.utils.create_api_session(
device_id, apilevel_example, server_host=server_host, server_port=server_port
)
zhinst.utils.api_server_version_check(daq)
# Enable the API's log.
daq.setDebugLevel(3)
# Create a base configuration: Disable all available outputs, awgs, demods, scopes,...
zhinst.utils.disable_everything(daq, device)
# Now configure the instrument for this experiment. The following channels
# and indices work on all device configurations. The values below may be
# changed if the instrument has multiple input/output channels and/or either
# the Multifrequency or Multidemodulator options installed.
# Signal output mixer amplitude [V].
amplitude = 0.500
out_channel = 0
# Get the value of the instrument's default Signal Output mixer channel.
out_mixer_channel = zhinst.utils.default_output_mixer_channel(props)
in_channel = 0
osc_index = 0
scope_in_channel = 0 # scope input channel
if props["devicetype"].startswith("UHF"):
frequency = 1.0e6
else:
frequency = 100e3
exp_setting = [
# The output signal.
["/%s/sigouts/%d/on" % (device, out_channel), 1],
["/%s/sigouts/%d/enables/%d" % (device, out_channel, out_mixer_channel), 1],
["/%s/sigouts/%d/range" % (device, out_channel), 1],
[
"/%s/sigouts/%d/amplitudes/%d" % (device, out_channel, out_mixer_channel),
amplitude,
],
["/%s/sigins/%d/imp50" % (device, in_channel), 1],
["/%s/sigins/%d/ac" % (device, in_channel), 0],
["/%s/sigins/%d/range" % (device, in_channel), 2 * amplitude],
["/%s/oscs/%d/freq" % (device, osc_index), frequency],
]
node_branches = daq.listNodes(f"/{device}/", 0)
if "DEMODS" in node_branches:
# NOTE we don't need any demodulator data for this example, but we need
# to configure the frequency of the output signal on out_mixer_c.
exp_setting.append(
["/%s/demods/%d/oscselect" % (device, out_mixer_channel), osc_index]
)
daq.set(exp_setting)
# Perform a global synchronisation between the device and the data server:
# Ensure that the signal input and output configuration has taken effect
# before calculating the signal input autorange.
daq.sync()
# Perform an automatic adjustment of the signal inputs range based on the
# measured input signal's amplitude measured over approximately 100 ms.
# This is important to obtain the best bit resolution on the signal inputs
# of the measured signal in the scope.
zhinst.utils.sigin_autorange(daq, device, in_channel)
# Configure the instrument's scope via the /dev..../scopes/n/ node tree branch.
# 'length' : the length of each scope record
daq.setInt("/%s/scopes/0/length" % device, scope_length)
# 'channel' : select the scope channel(s) to enable.
# Bit-encoded as following:
# 1 - enable scope channel 0
# 2 - enable scope channel 1
# 3 - enable both scope channels (requires DIG option)
# NOTE we are only interested in one scope channel: scope_in_channel and leave
# the other channel unconfigured
daq.setInt("/%s/scopes/0/channel" % device, 1 << in_channel)
# 'channels/0/bwlimit' : bandwidth limit the scope data. Enabling bandwidth
# limiting avoids antialiasing effects due to subsampling when the scope
# sample rate is less than the input channel's sample rate.
# Bool:
# 0 - do not bandwidth limit
# 1 - bandwidth limit
daq.setInt("/%s/scopes/0/channels/%d/bwlimit" % (device, scope_in_channel), 1)
# 'channels/0/inputselect' : the input channel for the scope:
# 0 - signal input 1
# 1 - signal input 2
# 2, 3 - trigger 1, 2 (front)
# 8-9 - auxiliary inputs 1-2
# The following inputs are additionally available with the DIG option:
# 10-11 - oscillator phase from demodulator 3-7
# 16-23 - demodulator 0-7 x value
# 32-39 - demodulator 0-7 y value
# 48-55 - demodulator 0-7 R value
# 64-71 - demodulator 0-7 Phi value
# 80-83 - pid 0-3 out value
# 96-97 - boxcar 0-1
# 112-113 - cartesian arithmetic unit 0-1
# 128-129 - polar arithmetic unit 0-1
# 144-147 - pid 0-3 shift value
daq.setInt(
"/%s/scopes/0/channels/%d/inputselect" % (device, scope_in_channel), in_channel
)
# 'time' : timescale of the wave, sets the sampling rate to 1.8GHz/2**time.
# 0 - sets the sampling rate to 1.8 GHz
# 1 - sets the sampling rate to 900 MHz
# ...
# 16 - sets the samptling rate to 27.5 kHz
daq.setInt("/%s/scopes/0/time" % device, 0)
# 'single' : only get a single scope record.
# 0 - acquire continuous records
# 1 - acquire a single record
# Note: configured below in main loop.
# daq.setInt('/%s/scopes/0/single' % device, 1)
# Configure the scope's trigger to get aligned data
# 'trigenable' : enable the scope's trigger (boolean).
# 0 - acquire continuous records
# 1 - only acquire a record when a trigger arrives
daq.setInt("/%s/scopes/0/trigenable" % device, 1)
# Specify the trigger channel, we choose the same as the scope input
daq.setInt("/%s/scopes/0/trigchannel" % device, in_channel)
# Trigger on rising edge?
daq.setInt("/%s/scopes/0/trigrising" % device, 1)
# Trigger on falling edge?
daq.setInt("/%s/scopes/0/trigfalling" % device, 0)
# Set the trigger threshold level.
daq.setDouble("/%s/scopes/0/triglevel" % device, 0.00)
# Set hysteresis triggering threshold to avoid triggering on noise
# 'trighysteresis/mode' :
# 0 - absolute, use an absolute value ('scopes/0/trighysteresis/absolute')
# 1 - relative, use a relative value ('scopes/0trighysteresis/relative') of the trigchannel's
# input range
# (0.1=10%).
daq.setDouble("/%s/scopes/0/trighysteresis/mode" % device, 1)
daq.setDouble("/%s/scopes/0/trighysteresis/relative" % device, 0.05)
# Set the trigger hold-off mode of the scope. After recording a trigger event, this specifies
# when the scope should become re-armed and ready to trigger, 'trigholdoffmode':
# 0 - specify a hold-off time between triggers in seconds ('scopes/0/trigholdoff'),
# 1 - specify a number of trigger events before re-arming the scope ready to trigger
# ('scopes/0/trigholdcount').
daq.setInt("/%s/scopes/0/trigholdoffmode" % device, 0)
daq.setDouble("/%s/scopes/0/trigholdoff" % device, 50e-6)
# Set trigdelay to 0.: Start recording from when the trigger is activated.
daq.setDouble("/%s/scopes/0/trigdelay" % device, 0.0)
# The trigger reference position relative within the wave, a value of 0.5 corresponds to the
# center of the wave.
daq.setDouble("/%s/scopes/0/trigreference" % device, 0.25)
# Disable trigger gating.
daq.setInt("/%s/scopes/0/triggate/enable" % device, 0)
# Enable segmented data transfer from the device.
daq.setInt("/%s/scopes/0/segments/enable" % device, 1)
# The number of segments to transfer in one shot.
# NOTE: We will set 'segments/count' on a per-record basis below.
# daq.setInt("/%s/scopes/0/segments/count" % device, 10)
# Perform a global synchronisation between the device and the data server: Ensure that the
# settings have taken
# effect on the device before continuing. This also clears the API's data buffers to remove any
# old data.
daq.sync()
# Check the scope_length parameter that was set:
scope_length_set = daq.getInt("/%s/scopes/0/length" % device)
print(
f"Actual scope length set on the device: {scope_length_set} (requested {scope_length})"
)
# Initialize and configure the Scope Module.
scopeModule = daq.scopeModule()
# 'mode' : Scope data processing mode.
# 0 - Pass through scope segments assembled, returned unprocessed, non-interleaved.
# 1 - Moving average, scope recording assembled, scaling applied, averaged, if averaging is
# enabled.
# 2 - Not yet supported.
# 3 - As for mode 1, except an FFT is applied to every segment of the scope recording.
scopeModule.set("mode", 1)
# 'averager/weight' : Average the scope shots using an exponentially weighted moving average of
# the previous 'weight' shots.
scopeModule.set("averager/weight", 1)
# 'historylength' : The number of scope records to keep in the Scope Module's memory, when more
# records arrive in the Module from the device the oldest records are overwritten.
scopeModule.set("historylength", historylength)
# Subscribe to the scope's data in the module.
wave_nodepath = f"/{device}/scopes/0/wave"
scopeModule.subscribe(wave_nodepath)
# Loop over the desired number of measurements. For each measurement we will get a scope record
# consisting of of the specified number of segments.
#
data = {}
data[wave_nodepath] = []
num_measurements = 5
segment_counts = [1, 5, 10, 15, 20]
for index, amplitude in enumerate(np.linspace(0.2, 1.0, num_measurements)):
# Use different signal output amplitudes simply to distinguish between
# different segments in the plot.
daq.setDouble(
"/%s/sigouts/%d/amplitudes/%d" % (device, out_channel, out_mixer_channel),
amplitude,
)
daq.sync()
# Perform an automatic adjustment of the signal inputs range based on
# the measured input signal's amplitude measured over approximately 100
# ms. This is important to obtain the best bit resolution on the signal
# inputs of the measured signal in the scope.
zhinst.utils.sigin_autorange(daq, device, in_channel)
# Note: We should disable the scope whilst modifying settings.
daq.setInt(f"/{device}/scopes/0/enable", 0)
# Set the desired number of segments.
daq.setInt(f"/{device}/scopes/0/segments/count", segment_counts[index])
daq.sync() # Ensure the setting has taken effect on the device before continuing.
segment_count_set = daq.getInt(f"/{device}/scopes/0/segments/count")
print(
f"Segment count set on the device: {segment_count_set}\
(requested {segment_counts[index]})."
)
if historylength == 1:
# Set the scope to operate in 'single' mode: Once one scope record consisting of the
# specified number of segments (>= 1) has been recorded the scope will automatically
# stop. Note: The device node scopes/0/single will be set back to 0 by the device after
# recording one record.
daq.setInt("/%s/scopes/0/single" % device, 1)
scopeModule.set("clearhistory", 1)
scope_records = get_scope_records(device, daq, scopeModule, historylength)
# Check the dictionary returned by read contains the expected data. The data returned is a
# dictionary with keys corresponding to the recorded data's path in the node hierarchy.
if wave_nodepath not in data:
print(
f"[error]: The subscribed data `{wave_nodepath}` for measurement {index} \
({amplitude}) was not returned."
)
else:
num_records = len(scope_records[wave_nodepath])
dt = scope_records[wave_nodepath][0][0]["dt"]
totalsamples = scope_records[wave_nodepath][0][0]["totalsamples"]
segment_duration = dt * totalsamples / segment_counts[index]
print(f"Scope data contains {num_records} record(s).")
print(f"Duration of each segment: {segment_duration} s.")
check_scope_record_flags(scope_records[wave_nodepath])
data[wave_nodepath].append(scope_records[wave_nodepath])
print("")
if plot and data[wave_nodepath]:
_, axis = plt.subplots()
axis.grid(True)
clockbase = daq.getInt("/%s/clockbase" % device)
total_segments = sum(segment_counts)
colors = cm.rainbow(np.linspace(0, 1, total_segments))
segment_index = 0
for index, records in enumerate(data[wave_nodepath]):
# We only plot the first record for each measurement. To plot all records for each
# measurement additionally loop over `records'.
wave = records[0][0]["wave"][scope_in_channel]
# Reshape the array to recover the individual segments (this is only necessary in
# segmented mode).
segments = wave.reshape(segment_counts[index], scope_length)
# Create a time array relative to the trigger time.
dt = records[0][0]["dt"]
# The timestamp is the timestamp of the last sample in the scope segment.
timestamp = records[0][0]["timestamp"]
triggertimestamp = records[0][0]["triggertimestamp"]
t_segment = np.arange(-scope_length, 0) * dt + (
timestamp - triggertimestamp
) / float(clockbase)
for segment in segments:
axis.plot(1e3 * t_segment, segment, color=colors[segment_index])
segment_index += 1
axis.set_title(
f"{num_measurements} Scope Records (consisting of different segment counts)"
)
axis.set_ylabel("Amplitude [V]")
axis.set_xlabel("Time, relative to trigger [ms]")
axis.axvline(0.0, linewidth=2, linestyle="--", color="k", label="Trigger time")
axis.autoscale(enable=True, axis="x", tight=True)
plt.show()
return data | e7f46a532b90fc1f208ebc2ae37b2216e2bd7561 | 9,016 |
def get_draft_url(url):
"""
Return the given URL with a draft mode HMAC in its querystring.
"""
if verify_draft_url(url):
# Nothing to do. Already a valid draft URL.
return url
# Parse querystring and add draft mode HMAC.
url = urlparse.urlparse(url)
salt = get_random_string(5)
# QueryDict requires a bytestring as its first argument
query = QueryDict(force_bytes(url.query), mutable=True)
query['edit'] = '%s:%s' % (salt, get_draft_hmac(salt, url.path))
# Reconstruct URL.
parts = list(url)
parts[4] = query.urlencode(safe=':')
return urlparse.urlunparse(parts) | f8eaaa7daaba2b5bfe448b5386e88d9f738b0f5d | 9,017 |
def make_datum(source: str, img_id: str, sent_id: int, sent: str):
"""
Create a datum from the provided infos.
:param source: the dataset of the particular sentence.
:param img_id: id of the image
:param sent_id: id of the sentence (of the image)
:param sent: the sentence
:return: a dict of datum
"""
uid = make_uid(img_id, source, sent_id)
img_path = get_img_path(source, img_id)
return {
'uid': uid,
'img_id': img_id,
'img_path': img_path,
'sent': sent,
} | 4814093519aad09e0f81d6e0841d130e1b2e43a4 | 9,018 |
def list_for_consumer(req):
"""List allocations associated with a consumer."""
context = req.environ['placement.context']
context.can(policies.ALLOC_LIST)
consumer_id = util.wsgi_path_item(req.environ, 'consumer_uuid')
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
# NOTE(cdent): There is no way for a 404 to be returned here,
# only an empty result. We do not have a way to validate a
# consumer id.
allocations = alloc_obj.get_all_by_consumer_id(context, consumer_id)
output = _serialize_allocations_for_consumer(
context, allocations, want_version)
last_modified = _last_modified_from_allocations(allocations, want_version)
allocations_json = jsonutils.dumps(output)
response = req.response
response.status = 200
response.body = encodeutils.to_utf8(allocations_json)
response.content_type = 'application/json'
if want_version.matches((1, 15)):
response.last_modified = last_modified
response.cache_control = 'no-cache'
return response | 37575bb0d05491d8a2e0933134fa530bf7699b3b | 9,019 |
import os
def get_supermean(name, season, data_dir, obs_flag=None):
"""Calculated supermeans from retrieved data, which are pickled Iris cubes.
:param name: Cube name. Should be CF-standard name. If no CF-standard name
exists the STASH code in msi format (for example m01s30i403)
is used as name.
:param season: Supermean for a season (including annual).
['ann', 'djf', 'mam', 'jja', 'son']
:param data_dir: Directory containing cubes of model output data for
supermeans.
:returns: Supermeaned cube.
:rtype Cube:
The monthly and seasonal supermeans are periodic averages, for example
the seasonal supermean consists of the averaged season, where each
season is averaged over several years.
The annual supermean is a continuous mean over multiple years.
Supermeans are only applied to full clima years (Starting Dec 1st).
"""
name_constraint = iris.Constraint(name=name)
if not obs_flag:
cubes_path = os.path.join(data_dir, 'cubeList.nc')
else:
cubes_path = os.path.join(data_dir, obs_flag + '_cubeList.nc')
cubes = iris.load(cubes_path)
# use STASH if no standard name
for cube in cubes:
if cube.name() == 'unknown':
cube.rename(str(cube.attributes['STASH']))
cube = cubes.extract_strict(name_constraint)
if season in ['djf', 'mam', 'jja', 'son']:
supermeans_cube = periodic_mean(cube, period='season')
return supermeans_cube.extract(iris.Constraint(season=season))
elif season == 'ann':
return periodic_mean(cube)
else:
raise ValueError(
"Argument 'season' must be one of "
"['ann', 'djf', 'mam', 'jja', 'son']. "
"It is: " + str(season)) | 22872ceeb6754ba33f6755cae5d2c363bf30f559 | 9,020 |
def get_zcl_attribute_size(code):
"""
Determine the number of bytes a given ZCL attribute takes up.
Args:
code (int): The attribute size code included in the packet.
Returns:
int: size of the attribute data in bytes, or -1 for error/no size.
"""
opts = (0x00, 0,
0x08, 1,
0x09, 2,
0x0a, 3,
0x0b, 4,
0x0c, 5,
0x0d, 6,
0x0e, 7,
0x0f, 8,
0x10, 1,
0x18, 1,
0x19, 2,
0x1a, 3,
0x1b, 4,
0x1c, 5,
0x1d, 6,
0x1e, 7,
0x1f, 8,
0x20, 1,
0x21, 2,
0x22, 3,
0x23, 4,
0x24, 5,
0x25, 6,
0x26, 7,
0x27, 8,
0x28, 1,
0x29, 3,
0x2a, 3,
0x2b, 4,
0x2c, 5,
0x2d, 6,
0x2e, 7,
0x2f, 8,
0x30, 1,
0x31, 2,
0x38, 2,
0x38, 4,
0x39, 8,
0x41, -1,
0x42, -1,
0x43, -1,
0x44, -1,
0x48, -1,
0x4c, -1,
0x50, -1,
0x51, -1,
0xe0, 4,
0xe1, 4,
0xe2, 4,
0xe8, 2,
0xe9, 2,
0xea, 4,
0xf0, 8,
0xf1, 16,
0xff, 0)
for i in range(0, len(opts), 2):
if code == opts[i]: return opts[i + 1]
return -1 | 99782c86be2413410c6819a59eadf0daba326af2 | 9,021 |
def get_mappings():
"""We process the mappings for two separate cases. (1) Variables that vary by year,
and (2) variables where there are multiple realizations each year.
"""
# Set up grid for survey years. Note that from 1996 we can only expect information every other
# year. We start with 1978 as information about 1978 employment histories is collected with
# the initial interview.
years = range(1978, 2013)
# time-constant variables
dct_full = dict()
dct_full.update(process_time_constant(years))
dct_full.update(process_school_enrollment_monthly())
dct_full.update(process_highest_degree_received())
dct_full.update(process_multiple_each_year())
dct_full.update(process_single_each_year())
# Finishing
return years, dct_full | a02ac60889ab2ef9524a50ec7eb03fe6a8b54917 | 9,022 |
def _get_function_name_and_args(str_to_split):
"""
Split a string of into a meta-function name and list of arguments.
@param IN str_to_split String to split
@return Function name and list of arguments, as a pair
"""
parts = [s.strip() for s in str_to_split.split(" | ")]
if len(parts) < 2:
raise Exception("Invalid meta function string: %s" % str_to_split)
func_name = parts[0]
func_args = parts[1:]
return func_name, func_args | 1dae51c87e727d7fa6a3a8012f9768b9ca3364e7 | 9,023 |
import os
import time
def runAndWatch(container, cgroup, watchCgroup, notify=None, wallClockLimit=None,
cpuClockLimit=None, pollInterval=1, notifyInterval=10):
"""
Run a container and watch it for time limits. Returns a dictionary with
container statistics.
"""
inspection = inspectContainer(container)
command = ["container", "start", "--runtime", RUNTIME, container]
if CGROUP_WORKAROUND:
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
cgroup.addProcess(pid)
invokePodmanCommand(command)
os._exit(0)
else:
invokePodmanCommand(command)
timeout = False
ticks = 0
maxMemoryUsage = 0
while True:
time.sleep(pollInterval)
ticks += 1
if ticks % notifyInterval == 0 and notify is not None:
notify()
inspection = inspectContainer(container)
if containerStatus(inspection) != "running":
break
wTime = containerRunTime(inspection)
maxMemoryUsage = max(maxMemoryUsage, watchCgroup.currentMemoryUsage())
cTime = watchCgroup.cpuStats()["usage_usec"]
if wTime >= wallClockLimit * 1000000 or cTime >= cpuClockLimit * 1000000:
stopContainer(container, timeout=1)
timeout = True
inspection = inspectContainer(container)
stats = {
"cpuStat": watchCgroup.cpuStats(),
"memStat": watchCgroup.memoryStats(),
"maxMemory": maxMemoryUsage,
"wallTime": containerRunTime(inspection),
"exitCode": containerExitCode(inspection),
"outOfMemory": containerOomKilled(inspection),
"timeout": timeout,
"output": containerLogs(container)
}
return stats | 24179f4e2f554bedb0ee0b8507d777723cf220b1 | 9,024 |
import requests
def replicas_on_delete():
"""
This is a route for ALL NODES.
A (previous) neighbor node sends POST requests to this route,
so that a key-value pair replica is deleted in the current NODE.
"""
# The hash ID of the node-owner of the primary replica
start_id = request.form['id']
key = request.form['key']
k = int(request.form['k'])
if (key in node.storage):
# Delete the key-value replica from our database
del node.storage[key]
if (k == 1 or node.next_id == start_id):
return "Replicas have been deleted!", 200
data_to_next = {
'id': start_id,
'key': key,
'k': k-1
}
url_next = "http://" + node.next_ip + ":" + \
str(node.next_port) + "/delete/replicas"
print("Informing the next neighbor to delete their replica.")
r = requests.post(url_next, data_to_next)
if r.status_code != 200:
print("Something went wrong with deleting the replica \
in the next node.")
return r.text, r.status_code | ff8b4cc06ce7a640914bdd58ff897dc060f22d4b | 9,025 |
import os
def load(train_dir=train_dir, test_dir=test_dir):
"""
Load the dataset into memory.
This uses a cache-file which is reloaded if it already exists,
otherwise the dataset is created and saved to
the cache-file. The reason for using a cache-file is that it
ensure the files are ordered consistently each time the dataset
is loaded. This is important when the dataset is used in
combination with Transfer Learning.
:return:
A DataSet-object.
"""
# Path for the cache-file.
cache_path = os.path.abspath("signatures.pkl")
# If the DataSet-object already exists in a cache-file
# then load it, otherwise create a new object and save
# it to the cache-file so it can be loaded the next time.
dataset = load_cached(cache_path=cache_path,
train_dir=train_dir,
test_dir=test_dir)
return dataset | f19c9d2220cd68a7f2722b6fdc2170d70cff4367 | 9,026 |
def pdf2(sigma_matrix, grid):
"""Calculate PDF of the bivariate Gaussian distribution.
Args:
sigma_matrix (ndarray): with the shape (2, 2)
grid (ndarray): generated by :func:`mesh_grid`,
with the shape (K, K, 2), K is the kernel size.
Returns:
kernel (ndarrray): un-normalized kernel.
"""
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2))
return kernel | 7477b33eab034d9ca5cac63fd1eedd4f6789f1ba | 9,027 |
def spell_sql(*args,**kwargs):
"""
list=[]
"""
if len(args[0])<=0:
return None
sql="SELECT * from `emotion_data` WHERE id ={}".format(args[0][0])
for index in args[0][1:]:
sql +=" or id ={}".format(index)
return sql | 5e5b231be2dabca75abed332864c8ae3d93b750e | 9,028 |
def is_within_bounds(bounds, point):
""" Returns true if point is within bounds. point is a d-array and bounds is a
dx2 array. bounds is expected to be an np.array object.
"""
point = np.array(point)
if point.shape != (bounds.shape[0],):
return False
above_lb = np.all((point - bounds[:, 0] >= 0))
below_ub = np.all((bounds[:, 1] - point >= 0))
return above_lb * below_ub | 926c107a808d98f62c0323746112b6f73b5f89fe | 9,029 |
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight.mean(dim=-1)
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss | b19b937f9b774dcac09f8949c2d1762743e7958e | 9,030 |
def list_of_paths():
"""
It lists all the folders which not contain PET images
"""
return ['.DS_Store', 'localizer', 'Space_3D_T2_FLAIR_sag_p2', 'AXIAL_FLAIR', 'MPRAGE_ADNI_confirmed_REPEATX2', 'Axial_PD-T2_TSE',
'Axial_PD-T2_TSE_repeat', 'MPRAGE_SAG_ISO_p2_ND', 'Axial_PD-T2_TSE_confirmed', 'MPRAGESAGISOp2ND', 'MPRAGE_ADNI_confirmed',
'MPRAGE_ADNI_confirmed_repeat', 'MPRAGE_SAG_ISO_p2', 'MPRAGE', 'MPRAGE_ADNI_confirmed_REPEAT', 'Axial_PD-T2_TSE_confirmed_repeat',
'MPRAGE_ADNI_conf_REPEAT', 'Space_3D_T2_FLAIR_sag_p2_REPEAT', 'MPRAGE_ADNI_confirmed_RPT', 'Brain_256_1.6_zoom_4_x_4_iter',
'Space_3D_T2_FLAIR_sag_REPEAT', 'Axial_PD-T2_TSE_RPTconfirmed', 'Axial_PD-T2_TSE_RPT_confirmed', 'Axial_PD-T2_TSE_confirmed_REPEAT',
'flair_t2_spc_irprep_ns_sag_p2_1mm_iso', 'localiser'] | bc74024d49396f80947b3cb0a45066381b7d3af4 | 9,031 |
def convert_onnx_to_ell(path, step_interval_msec=None, lag_threshold_msec=None):
"""
convert the importer model into a ELL model, optionally a steppable model if step_interval_msec
and lag_threshold_msec are provided.
"""
_logger = logger.get()
_logger.info("Pre-processing... ")
converter = convert.OnnxConverter()
importer_model = converter.load_model(path)
_logger.info("\n Done pre-processing.")
try:
importer_engine = common.importer.ImporterEngine(step_interval_msec=step_interval_msec,
lag_threshold_msec=lag_threshold_msec)
ell_map = importer_engine.convert_nodes(importer_model)
ordered_importer_nodes, node_mapping = importer_engine.get_importer_node_to_ell_mapping()
except Exception as e:
_logger.error("Error occurred while attempting to convert the model: " + str(e))
raise
return ell_map, ordered_importer_nodes | 28843c1b588d4c1772c5c4be10e1a535b940703d | 9,032 |
def cdfRosconi(cdfThickness=np.linspace(0,1,1000),
alpha=1.71e11, beta=8.17, gamma=55.54):
"""
TODO: Not Yet Implemented
* Input to this function has units of mm for default parameters.
** Default values of alpha, beta and gamma derived from:
Rosconi et al. Quantitative approach to the stochastics of bone remodeling. 2012.
thickness - range of thicknesses (in mm) considered in the pdf calculation.
"""
# Thickness distribution (using default parameters, thickness is in mm)
def distributionRosconi(t, alpha=alpha, beta=beta, gamma=gamma):
return alpha*(t**beta)*np.exp(-gamma*t)
pdf = distributionRosconi(cdfThickness, alpha, beta, gamma)
pdf = pdf / np.sum(pdf)
cdf = np.cumsum(pdf)
return cdfThickness, cdf | 3f774c4be62c2b1b01430c7dce6aae4374693ae1 | 9,033 |
def compute_error_model(model_metadata, X_test, y_test, target,error_metric):
"""Computes the model MRR based on test data
:param model_metadata: a dictionary containing metadata about a model
:param X_test: a dataframe containing features specfic to the model being evaluated
:param y_test: a dataframe of target labels
:param target: the column which contains the actual labels for training data
:param error_metric: error metric to evalualte model performance on (MAE, RMSE, etc.)
:return: the computed error
"""
model_pipeline = get_prediction_pipeline(model_metadata)
pred_prices = model_pipeline.predict(X_test)
error = compute_error(y_test, pred_prices, error_metric)
return error | 9cb1ede604f863c1eeab12a593c8b62527599d12 | 9,034 |
def column(df, s, column) -> ReturnType:
"""Gets the series of the column named `column`
"""
return df.loc[s, column].to_numpy(), 0 | 8d400c2425a062566e61c23361dd6a1f6e0ba8b7 | 9,035 |
def features_to_id(features, intervals):
"""Convert list of features into index using spacings provided in intervals"""
id = 0
for k in range(len(intervals)):
id += features[k] * intervals[k]
# Allow 0 index to correspond to null molecule 1
id = id + 1
return id | 74b0b201888a69c045ef140959876dd3e909f20d | 9,036 |
import torch
def index_initial(n_batch, n_ch, tensor=True):
"""Tensor batch and channel index initialization.
Args:
n_batch (Int): Number of batch.
n_ch (Int): Number of channel.
tensor (bool): Return tensor or numpy array
Returns:
Tensor: Batch index
Tensor: Channel index
"""
batch_index = []
for i in range(n_batch):
batch_index.append([[i]] * n_ch)
ch_index = []
for i in range(n_ch):
ch_index += [[i]]
ch_index = [ch_index] * n_batch
if tensor:
batch_index = torch.tensor(batch_index)
ch_index = torch.tensor(ch_index)
if torch.cuda.is_available():
batch_index = batch_index.cuda()
ch_index = ch_index.cuda()
return batch_index, ch_index | 52a16ad4afcf931ba4cda9c014d47050970995c5 | 9,037 |
from distutils.spawn import find_executable
import os
def which(binary_name, pathvar=None):
""" Deduces the path corresponding to an executable name,
as per the UNIX command `which`. Optionally takes an
override for the $PATH environment variable.
Always returns a string - an empty one for those
executables that cannot be found.
"""
if not hasattr(which, 'pathvar'):
which.pathvar = os.getenv("PATH", DEFAULT_PATH)
return find_executable(binary_name, pathvar or which.pathvar) or "" | 6a1c02ea939e119df72c4ad1b3e685614218574d | 9,038 |
def load_titanic(test_size=0.2, random_state=1, cache_dir=None, cache_subdir='datasets'):
""" load titanic database """
path = find_path(DatasetEnum.titanic, cache_dir=cache_dir, cache_subdir=cache_subdir)
df = pd.read_csv(path, sep=",", na_values=["?"], keep_default_na=True)
# Shuffle DF and compute train/test split
df = df.sample(frac=1, random_state=random_state).reset_index(drop=True)
idx = int(len(df) * (1 - test_size))
df_train = df.loc[:idx]
df_test = df.loc[idx:]
# Filter columns and build X, y
y_train = df_train["survived"].values
del df_train["survived"]
y_test = df_test["survived"].values
del df_test["survived"]
infos = {}
return df_train, y_train, df_test, y_test, infos | a222a684a55bde482664b0b3072fb04047360f50 | 9,039 |
def mock_function_fail(*args, **kwargs):
"""
Mock a function that 'fails', i.e., returns a 1.
"""
print("\nmock> f({}) ==> 1".format(args)) # pragma: no cover
return 1 # pragma: no cover | ec2085e51a0809c9656d1831429858e14baf3f63 | 9,040 |
def get_field_result(client_id, field_id, count=1):
"""
на входе: id-поля, id-карты,
выход: последний результат поля
:return:
"""
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT directions_napravleniya.client_id, directions_issledovaniya.napravleniye_id,
directions_issledovaniya.research_id, directions_issledovaniya.time_confirmation AT TIME ZONE %(tz)s as time_confirmation,
to_char(directions_issledovaniya.time_confirmation AT TIME ZONE %(tz)s, 'DD.MM.YYYY') as date_confirm,
directions_paraclinicresult.value, directions_paraclinicresult.field_id
FROM directions_issledovaniya
LEFT JOIN directions_napravleniya
ON directions_issledovaniya.napravleniye_id=directions_napravleniya.id
LEFT JOIN directions_paraclinicresult
ON directions_issledovaniya.id=directions_paraclinicresult.issledovaniye_id
WHERE directions_napravleniya.client_id = %(client_p)s
and directions_paraclinicresult.field_id = %(field_id)s
and directions_issledovaniya.time_confirmation is not NULL
ORDER BY directions_issledovaniya.time_confirmation DESC LIMIT %(count_p)s
""",
params={'client_p': client_id, 'field_id': field_id, 'count_p': count, 'tz': TIME_ZONE},
)
row = cursor.fetchall()
return row | 7191705462f1fceb3dfca866c5fed96fa8019886 | 9,041 |
def parse_basic_profile_forms():
"""Parses and validates basic profile forms in the request.
Returns:
A dictionary containing user profile.
Raises:
ValueError: When validation failed.
"""
return {
'display_name': get_form_string('display_name', 32),
'contact_email': get_form_string('contact_email', 256),
'member_names': get_form_string('member_names', 4096),
'nationalities': get_form_string('nationalities', 1024, allow_empty=True),
'languages': get_form_string('languages', 1024, allow_empty=True),
'source_url': get_form_string('source_url', 2083, allow_empty=True),
} | c8409bcc7de6a2c0a320859f90d54215888febf8 | 9,042 |
def fixture_success(request):
"""
Test Cases:
1. Hitting uncovered route as base user (logged in flow). Will return 200
since uncovered route is an open endpoint and thus Anonymous users can also
access it.
2. Hitting uncovered route as base user and HEAD request
3. Hitting uncovered route as admin user and HEAD request
4. Hitting uncovered route as super admin user and GET request
5. Hitting uncovered route as super admin user and HEAD request
6. Hitting uncovered route as anonymous user and GET request
7. Hitting uncovered route as anonymous user and HEAD request
8. Hitting covered route as admin user and GET request
9. Hitting covered route as admin user and HEAD request
10. Hitting covered route as super admin user and POST request
11. Hitting covered route as super admin user and GET request
12. Hitting covered route as super admin user and HEAD request
"""
db.create_all()
base_user, admin_user, super_admin_user = config_data_setup()
data_to_send = [
{
'input': {
'method': 'GET',
'url_rule': '/uncovered_route',
'user': base_user,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'HEAD',
'url_rule': '/uncovered_route',
'user': base_user,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'GET',
'url_rule': '/uncovered_route',
'user': admin_user,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'HEAD',
'url_rule': '/uncovered_route',
'user': admin_user,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'GET',
'url_rule': '/uncovered_route',
'user': super_admin_user,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'HEAD',
'url_rule': '/uncovered_route',
'user': super_admin_user,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'GET',
'url_rule': '/uncovered_route',
'user': None,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'HEAD',
'url_rule': '/uncovered_route',
'user': None,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'GET',
'url_rule': '/covered_route',
'user': admin_user,
'function': app.view_functions['covered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'HEAD',
'url_rule': '/covered_route',
'user': admin_user,
'function': app.view_functions['covered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'POST',
'url_rule': '/covered_route',
'user': super_admin_user,
'function': app.view_functions['covered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'GET',
'url_rule': '/covered_route',
'user': super_admin_user,
'function': app.view_functions['covered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'HEAD',
'url_rule': '/covered_route',
'user': super_admin_user,
'function': app.view_functions['covered_route']
},
'output': {
'status_code': 200
}
}
]
request.addfinalizer(tear_down)
return app, data_to_send | 26603ce9203372e9ced217f75505b149942eee98 | 9,043 |
from typing import Optional
import csv
def get_quote_name(quote_number: int) -> Optional[str]:
""" used to help applications look up quote names based on the number
users.
"""
assert type(quote_number) in (int, type(None))
if quote_number is None:
return None
for key, value in csv.__dict__.items():
if value == quote_number:
return key
else:
raise ValueError('invalid quote_number: {}'.format(quote_number)) | 4a96ee42b37879469a67cb657d97aa321770fd83 | 9,044 |
def calc_floodzone(row):
"""Extracts the FEMAZONE of an SFHA based on each row's attributes.
This function acts on individual rows of a pandas DataFrame using
the apply built-in.
Parameters
----------
row : Pandas Series
A row of a pandas DataFrame
Returns
-------
str
The flood zone designation for an SFHA
"""
if row["FLD_ZONE"] == 'AO':
zone = 'AO' + str(round(row['DEPTH']))
elif row["FLD_ZONE"] == 'AH':
zone = 'AH' + str(round(row["STATIC_BFE"]))
else:
zone = row["FLD_ZONE"]
return zone | 5bb6f3f7cfc1b6bce41ad7a752845287759c16ad | 9,045 |
def trans_you(ori_image, img_db, target_size=(8, 8)):
"""Transfer original image to composition of images.
Parameters
----------
ori_image : numpy.ndarray
the original image
img_db : h5py.File
image datasets
target_size : tuple
Returns
-------
res_img : numpy.ndarray
result image
"""
tot_pixels = ori_image.shape[0]*ori_image.shape[1]
image_idx = img_idx(tot_pixels)
res_img = np.zeros_like(ori_image)
res_img = imresize(res_img,
(res_img.shape[0]*target_size[0],
res_img.shape[1]*target_size[1]))
for i in xrange(ori_image.shape[0]):
for j in xrange(ori_image.shape[1]):
idx = image_idx[i*ori_image.shape[1]+j]
img = get_img(img_db, idx)
pixel = ori_image[i, j, :]
img = trans_img(img, pixel, target_size)
res_img[i*target_size[0]:(i+1)*target_size[0],
j*target_size[1]:(j+1)*target_size[1]] = img
print ("[MESSAGE] Row %i is processed." % (i+1))
return res_img | f9717d2ddc9052bee103010a23328f5445c4edc5 | 9,046 |
from re import A
from re import T
def new_assessment():
"""
RESTful CRUD controller to create a new 'complete' survey
- although the created form is a fully custom one
"""
# Load Model
table = s3db.survey_complete
s3db.table("survey_series")
def prep(r):
if r.interactive:
viewing = get_vars.get("viewing", None)
if viewing:
dummy, series_id = viewing.split(".")
else:
series_id = get_vars.get("series", None)
if not series_id:
series_id = r.id
if series_id is None:
# The URL is bad, without a series id we're lost so list all series
redirect(URL(c="survey", f="series", args=[], vars={}))
if len(request.post_vars) > 0:
id = s3db.survey_save_answers_for_series(series_id,
None, # Insert
request.post_vars)
response.confirmation = \
s3.crud_strings["survey_complete"].msg_record_created
r.method = "create"
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Not sure why we need to repeat this & can't do it outside the prep/postp
viewing = get_vars.get("viewing", None)
if viewing:
dummy, series_id = viewing.split(".")
else:
series_id = get_vars.get("series", None)
if not series_id:
series_id = r.id
if output["form"] is None:
# The user is not authorised to create so switch to read
redirect(URL(c="survey", f="series",
args=[series_id, "read"],
vars={}))
# This is a bespoke form which confuses CRUD, which displays an
# error "Invalid form (re-opened in another window?)"
# So so long as we don't have an error in the form we can
# delete this error.
elif response.error and not output["form"]["error"]:
response.error = None
s3db.survey_answerlist_dataTable_post(r)
form = s3db.survey_buildQuestionnaireFromSeries(series_id, None)
urlimport = URL(c=module, f="complete", args=["import"],
vars={"viewing":"%s.%s" % ("survey_series", series_id),
"single_pass":True}
)
buttons = DIV(A(T("Upload Completed Assessment Form"),
_href=urlimport,
_id="Excel-import",
_class="action-btn"
),
)
output["form"] = TAG[""](buttons, form)
return output
s3.postp = postp
return crud_controller(module, "complete",
method = "create",
rheader = s3db.survey_series_rheader
) | a4b1f9ba0a7e70349607f5cc70fdac72d75fb236 | 9,047 |
import types
import random
async def random_pokemon(connection: asyncpg.Connection, /) -> types.Pokemon:
"""Returns a random :class:`types.Pokemon`."""
records = await tables.Pokemon.fetch(connection)
return await _pokemon(connection, random.choice(records)) | b60659f236a4cbea998a77df211da92c18e4f0b8 | 9,048 |
import re
def remove_space(text):
"""
Funcion que elimina espacios
:param str text: texto a procesar
"""
return re.sub(r"\s+", " ", text).strip() | 729d26bb6acbaa8da4c945d2ea6646ebb90f3122 | 9,049 |
import base64
def getFilePathBase():
"""
获取请求url文件的文件路径
:return: php->base64 code
"""
code = """
@ini_set("display_errors","0");
@set_time_limit(0);
@set_magic_quotes_runtime(0);
header("Content-Type:application/json");
$res = array();$res["path"] = dirname(__FILE__);
echo ("<ek>");
echo json_encode($res);
echo ("</ek>");
die();
"""
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8") | afcb1a5bf2972a2b13a32edcd8a9b968742bf7f3 | 9,050 |
def extractHeldSimple(q, factoryConfig=None):
"""All Held Glideins: JobStatus == 5
q: dictionary of Glideins from condor_q
factoryConfig (FactoryConfig): Factory configuartion (NOT USED, for interface)
Returns:
dict: dictionary of Held Glideins from condor_q
"""
# Held==5
qheld = q.fetchStored(lambda el: el["JobStatus"] == 5)
qheld_list = list(qheld.keys())
return qheld_list | c942991bb0370b63364c1b8d5644713865d9ea82 | 9,051 |
def neighbors(stats1, stats2, max_val=1e5):
"""stats from cv.connectedComponentsWithStats."""
pts1 = np.concatenate(
(stats1[:, :2], stats1[:, :2] + stats1[:, 2:4]), axis=0)
pts2 = np.concatenate(
(stats2[:, :2], stats2[:, :2] + stats2[:, 2:4]), axis=0)
dist = np.abs(pts1[:, None] - pts2).sum(axis=2)
eye = np.eye(dist.shape[0], dtype=dist.dtype)
R = (dist + eye * max_val).argmin(axis=1)
return R.reshape((2, -1)).T | 1b6aecad76f968cd83d40ee6531fcbd6b3b0df6c | 9,052 |
from typing import Optional
def shortest_substring_containing_characters(text: str, char_set: set) -> Optional[str]:
"""
O(n) & O(k)
"""
start = 0
end = -1
count_char = defaultdict(int) # char and its count
found_set = set()
for index, char in enumerate(text):
if char in char_set:
count_char[char] += 1
found_set.add(char)
if len(found_set) == len(char_set):
new_start = start
new_end = index
while text[new_start] not in char_set or count_char[text[new_start]] > 1:
if text[new_start] in count_char:
count_char[text[new_start]] -= 1
new_start += 1
if end < start or (new_end - new_start) < (end - start):
end = new_end
start = new_start
return text[start: end + 1] if end > start else None | 4682a01b1a4331dbada7a234c908d1c53639e69a | 9,053 |
def refine_grid(
grid,
cb,
grid_additions=(50, 50),
ntrail=2,
blurs=((), ()),
metric=None,
atol=None,
rtol=None,
extremum_refinement=None,
snr=False,
):
"""Refines an existing grid by adding points to it.
Parameters
----------
grid : array
cb : callbable
Function to be evaluated (note that noise is handled poorly).
grid_additions : iterable of ints (even numbers)
Sequence specifying how many gridpoints to add each time.
ntrail : int (>= 2)
Number of points to include in the look-ahead extrapolation.
blurs : pair of iterables of ints (of same length)
Blur fractions of absolute residuals to neighbors.
atol : float
Absolute tolerance to be fulfilled by all absolute residuals for early exit.
rtol : float
Relative tolerance to be fulfilled by all absolute residuals for early exit.
extremum_refinement : locator (callable), n (int), predicate (callable)
Between each grid addition a callable for locating the extremum (e.g. np.argmax)
can be evaluated. The integer specifies how many gridpoints that should be inserted
on each side (one side if on boundary) of the extremum.
snr : bool
Use signal-to-noise ratio the lower the grid-addition-weight of potential noise.
Returns
-------
(grid, errors)
"""
for na in grid_additions:
if (na % 2) != 0:
raise ValueError("Need even number of grid points for each addition")
if extremum_refinement == "max":
extremum_refinement = (np.argmax, 1, lambda y, i: True)
elif extremum_refinement == "min":
extremum_refinement = (np.argmin, 1, lambda y, i: True)
def add_to(adds, grd, res, ys):
na = np.sum(adds)
if na == 0:
return grd, res, ys
nextresults = np.empty(grd.size + na, dtype=object)
nextresults[0] = res[0]
nexty = np.empty(grd.size + na)
nexty[0] = ys[0]
nextgrid = np.empty(grd.size + na)
nextgrid[0] = grd[0]
ptr = 1
yslices = []
for gi, nloc in enumerate(adds):
nextgrid[ptr : ptr + nloc + 1] = np.linspace(
grd[gi], grd[gi + 1], 2 + nloc
)[1:]
nextresults[ptr + nloc] = res[gi + 1]
nexty[ptr + nloc] = ys[gi + 1]
if nloc > 0:
yslices.append(slice(ptr, ptr + nloc))
ptr += nloc + 1
newresults = cb(np.concatenate([nextgrid[yslc] for yslc in yslices]))
newy = (
newresults if metric is None else np.array([metric(r) for r in newresults])
)
ystart, ystop = 0, 0
for yslc in yslices:
ystop += yslc.stop - yslc.start
nextresults[yslc] = newresults[ystart:ystop]
nexty[yslc] = newy[ystart:ystop]
ystart = ystop
return nextgrid, nextresults, nexty
results = cb(grid)
y = np.array(
results if metric is None else [metric(r) for r in results], dtype=np.float64
)
for na in grid_additions:
if extremum_refinement:
extremum_cb, extremum_n, predicate_cb = extremum_refinement
argext = extremum_cb(y)
if predicate_cb(y, argext):
additions = np.zeros(grid.size - 1, dtype=int)
if argext > 0: # left of
additions[argext - 1] = extremum_n
elif argext < grid.size - 1: # right of
additions[argext] = extremum_n
grid, results, y = add_to(additions, grid, results, y)
additions = np.zeros(grid.size - 1, dtype=int)
done = True if atol is not None or rtol is not None else False
slcs, errs = [], []
for direction in ("fw", "bw"):
est, slc = interpolate_ahead(grid, y, ntrail, direction)
err = np.abs(y[slc] - est)
if atol is not None:
done = done and np.all(err < atol)
if rtol is not None:
done = done and np.all(err / y[slc] < rtol)
slcs.append(slc)
errs.append(err)
if snr:
all_errs = np.array(
[[0.0] * ntrail + errs[0].tolist(), errs[1].tolist() + [0.0] * ntrail]
)
min__max = np.amin(all_errs, axis=0) / np.amax(all_errs, axis=0)
dgrid = np.diff(grid)
delta = np.empty_like(grid)
delta[0] = dgrid[0] ** -2
delta[-1] = dgrid[-1] ** -2
delta[1:-1] = 1 / (dgrid[:-1] * dgrid[1:])
lndelta = np.log(delta)
normlndelta = lndelta - np.max(lndelta)
for i in range(2):
errs[i] *= (1.0 + 1e-8) - min__max[slcs[i]]
errs[i] *= np.exp(normlndelta[slcs[i]])
for direction, blur, slc, err in zip(("fw", "bw"), blurs, slcs, errs):
for ib, b in enumerate(blur, 1):
blur_slices = (slice(ib, None), slice(None, -ib))
err[blur_slices[direction == "bw"]] += (
b * err[blur_slices[direction == "fw"]]
)
rerr = np.array(np.round(err * na / 2 / np.sum(err)), dtype=int)
delta = np.sum(rerr) - na // 2
if delta == 0:
pass
else:
sorted_indices = np.argsort(rerr)
for i in sorted_indices[-abs(delta) :]:
rerr[i] += 1 if delta < 0 else -1
if np.sum(rerr) - na // 2:
raise ValueError("Balancing failed.")
additions[
slice(ntrail - 1, None)
if direction == "fw"
else slice(None, 1 - ntrail)
] += rerr
grid, results, y = add_to(additions, grid, results, y)
if done:
break
return grid, results | c84a365bcc271622fd49a01d89303aa2adb1c624 | 9,054 |
from datetime import datetime
def last_week(today: datetime=None, tz=None):
"""
Returns last week begin (inclusive) and end (exclusive).
:param today: Some date (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive)
"""
if today is None:
today = datetime.utcnow()
begin = today - timedelta(weeks=1, days=today.weekday())
begin = datetime(year=begin.year, month=begin.month, day=begin.day)
return localize_time_range(begin, begin + timedelta(days=7), tz) | a210707e2a479fe4e8b98a137c0ade684d4dd6da | 9,055 |
def get_velocity_limits():
"""
"""
velocity_limits = {}
for i in range(6):
try:
velocity_limits['a{}'.format(i+1)] = float(pm.textField(
't_A{}vel'.format(i+1),
q=True,
text=True))
except ValueError:
pm.error('Robot velocity limits must be floats')
return velocity_limits | 68f58ed715a39478d119af1e1aabe54fa7ec6094 | 9,056 |
def decode_item_length(encoded_data: Bytes) -> int:
"""
Find the length of the rlp encoding for the first object in the
encoded sequence.
Here `encoded_data` refers to concatenation of rlp encoding for each
item in a sequence.
NOTE - This is a helper function not described in the spec. It was
introduced as the spec doesn't discuss about decoding the RLP encoded
data.
Parameters
----------
encoded_data :
RLP encoded data for a sequence of objects.
Returns
-------
rlp_length : `int`
"""
# Can't decode item length for empty encoding
ensure(len(encoded_data) > 0)
first_rlp_byte = Uint(encoded_data[0])
# This is the length of the big endian representation of the length of
# rlp encoded object byte stream.
length_length = Uint(0)
decoded_data_length = 0
# This occurs only when the raw_data is a single byte whose value < 128
if first_rlp_byte < 0x80:
# We return 1 here, as the end formula
# 1 + length_length + decoded_data_length would be invalid for
# this case.
return 1
# This occurs only when the raw_data is a byte stream with length < 56
# and doesn't fall into the above cases
elif first_rlp_byte <= 0xB7:
decoded_data_length = first_rlp_byte - 0x80
# This occurs only when the raw_data is a byte stream and doesn't fall
# into the above cases
elif first_rlp_byte <= 0xBF:
length_length = first_rlp_byte - 0xB7
ensure(length_length < len(encoded_data))
# Expectation is that the big endian bytes shouldn't start with 0
# while trying to decode using RLP, in which case is an error.
ensure(encoded_data[1] != 0)
decoded_data_length = Uint.from_be_bytes(
encoded_data[1 : 1 + length_length]
)
# This occurs only when the raw_data is a sequence of objects with
# length(concatenation of encoding of each object) < 56
elif first_rlp_byte <= 0xF7:
decoded_data_length = first_rlp_byte - 0xC0
# This occurs only when the raw_data is a sequence of objects and
# doesn't fall into the above cases.
elif first_rlp_byte <= 0xFF:
length_length = first_rlp_byte - 0xF7
ensure(length_length < len(encoded_data))
# Expectation is that the big endian bytes shouldn't start with 0
# while trying to decode using RLP, in which case is an error.
ensure(encoded_data[1] != 0)
decoded_data_length = Uint.from_be_bytes(
encoded_data[1 : 1 + length_length]
)
return 1 + length_length + decoded_data_length | d005b8050abaaba76bd5d3a24419f86c462af2b2 | 9,057 |
def pxor(a1, a2, fmt=None):
"""Bitwise XOR"""
return c2repr(_inconv(a1) ^ _inconv(a2), fmt) | a65ada1901fc5bfa202af5128c3e5b6e54d5f6dc | 9,058 |
from typing import Tuple
def milestone_2_test_1_initial_val(lattice_grid_shape: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""
Return initial conditions
Args:
lattice_grid_shape: lattice grid [lx, ly]
Returns:
density with 0.5, but one peak in the middle, velocities 0
"""
density = np.ones(lattice_grid_shape) * 0.5
density[lattice_grid_shape[0] / 2, lattice_grid_shape[1] / 2] = 0.6
velocity = np.ones(lattice_grid_shape) * 0.0
return density, velocity | 89d6ed57e93859182a92946e94adc2d26631f6e3 | 9,059 |
def test_element_html_call_get_attribute(monkeypatch, browser_driver):
"""Calls el_or_xpath WebElement attr get_attribute"""
called = []
class FakeWebElement:
def get_attribute(self, val):
called.append(('get_attribute', val))
return 42
@browser_driver.register
class FakeDriver:
pass
# This is needed to pass type checks in element_html()
monkeypatch.setattr(core, 'WebElement', FakeWebElement)
b = Browser(FakeDriver())
fake_el = FakeWebElement()
retval = b.element_html(fake_el, core.HTMLProperty.outer)
assert retval == 42
assert called == [
('get_attribute', core.HTMLProperty.outer.value)
] | 7b3bcc3ba4a8c030b15649b240f75bf9bed71570 | 9,060 |
import time
def moving_dictators(session, system_ids):
"""
Show newly controlling dictators in the last 5 days.
Show all controlling dictators in monitored systems.
Subqueries galore, you've been warned.
Returns: A list of messages to send.
"""
gov_dic = session.query(Government.id).\
filter(Government.text.in_(["Anarchy", "Dictatorship"])).\
scalar_subquery()
control_state_id = session.query(PowerState.id).\
filter(PowerState.text == "Control").\
scalar_subquery()
current = sqla_orm.aliased(FactionState)
pending = sqla_orm.aliased(FactionState)
sys = sqla_orm.aliased(System)
sys_control = sqla_orm.aliased(System)
dics = session.query(Influence, sys.name, Faction.name, Government.text,
current.text, pending.text,
sqla.func.ifnull(sys_control.name, 'N/A').label('control')).\
join(sys, Influence.system_id == sys.id).\
join(Faction, Influence.faction_id == Faction.id).\
join(Government, Faction.government_id == Government.id).\
join(current, Influence.state_id == current.id).\
join(pending, Influence.pending_state_id == pending.id).\
outerjoin(
sys_control, sqla.and_(
sys_control.power_state_id == control_state_id,
sys_control.dist_to(sys) < 15
)
).\
filter(Influence.system_id.in_(system_ids),
Government.id.in_(gov_dic)).\
order_by('control', sys.name).\
all()
look_for = [sqla.and_(InfluenceHistory.system_id == inf[0].system_id,
InfluenceHistory.faction_id == inf[0].faction_id)
for inf in dics]
time_window = time.time() - (60 * 60 * 24 * 2)
inf_history = session.query(InfluenceHistory).\
filter(sqla.or_(*look_for)).\
filter(InfluenceHistory.updated_at >= time_window).\
order_by(InfluenceHistory.system_id, InfluenceHistory.faction_id,
InfluenceHistory.updated_at.desc()).\
all()
pair_hist = {}
for hist in inf_history:
key = "{}_{}".format(hist.system_id, hist.faction_id)
pair_hist[key] = hist
lines = [["Control", "System", "Faction", "Gov", "Date",
"Inf", "Inf (2 days ago)", "State", "Pending State"]]
for dic in dics:
key = "{}_{}".format(dic[0].system_id, dic[0].faction_id)
try:
lines += [[dic[-1], dic[1][:16], dic[2][:16], dic[3][:3],
dic[0].short_date, "{:5.2f}".format(round(dic[0].influence, 2)),
"{:5.2f}".format(round(pair_hist[key].influence, 2)), dic[-3], dic[-2]]]
except KeyError:
lines += [[dic[-1], dic[1][:16], dic[2][:16], dic[3][:3],
dic[0].short_date, "{:5.2f}".format(round(dic[0].influence, 2)), "N/A",
dic[-3], dic[-2]]]
prefix = "**\n\nInf Movement Anarchies/Dictators**)\n"
prefix += "N/A: Means no previous information, either newly expanded to system or not tracking.\n"
return cog.tbl.format_table(lines, header=True, prefix=prefix) | 9d9808d608190dae0a9f57980312e2ae830c492c | 9,061 |
def get_alt_for_q_with_constant_mach(q, mach, tol=5., SI=False, nmax=20):
# type: (float, float, float, bool, int) -> float
"""
Gets the altitude associated with a dynamic pressure.
Parameters
----------
q : float
the dynamic pressure lb/ft^2 (SI=Pa)
mach : float
the mach to hold constant
tol : float; default=5.
tolerance in feet/meters
SI : bool
should SI units be used; default=False
Returns
-------
alt : float
the altitude in ft (SI=m)
"""
pressure = 2 * q / (1.4 * mach ** 2) # gamma = 1.4
alt = get_alt_for_pressure(pressure, tol=tol, SI=SI, nmax=nmax)
return alt | f9286d7f742a8e8e3f25d63210180dbd7bc2fcc7 | 9,062 |
def addMetadataFlags(metadataChunk, numberOfMetadataChunks):
"""Adds binary flag the number of metadata chunks this upload has (uint8).
Arguments:
metadataChunk {bytes} -- First metadata chunk already encrypted, but before signing.
numberOfMetadataChunks {int} -- Self-explanatory.
Returns:
bytes -- Metadata chunk ready to be signed.
"""
#pylint: disable=E1111
numberFlag = np.uint8(numberOfMetadataChunks).tobytes()
fullMetadataChunk = b"".join([numberFlag, metadataChunk])
return fullMetadataChunk | aeaefd8e1cd62524d435ee95bc272a9a676680c0 | 9,063 |
def table(a):
"""get tabular view of obj, if available, else return obj"""
if misc.istablarray(a):
return a.__view__('table')
return a | e04b53f40203fbeeb3104f5e46bab87ab3304269 | 9,064 |
def parse_quadrupole(line):
"""
Quadrupole (type 1)
V1: zedge
V2: quad gradient (T/m)
V3: file ID
If > 0, then include fringe field (using Enge function) and
V3 = effective length of quadrupole.
V4: radius (m)
V5: x misalignment error (m)
V6: y misalignment error (m)
V7: rotation error x (rad)
V8: rotation error y (rad)
V9: rotation error z (rad)
If V9 != 0, skew quadrupole
V10: rf quadrupole frequency (Hz)
V11: rf quadrupole phase (degree)
"""
v = v_from_line(line)
d={}
d['zedge'] = float(v[1])
d['b1_gradient'] = float(v[2])
if float(v[3]) > 0:
d['L_effective'] = float(v[3])
else:
d['file_id'] = int(v[3])
d['radius'] = float(v[4])
d2 = parse_misalignments(v[5:10])
d.update(d2)
if len(v) > 11:
d['rf_frequency'] = float(v[10])
d['rf_phase_deg'] = float(v[11])
return(d) | 2e9748fb0eabe51383fcb1ff47a7278dda622e44 | 9,065 |
def cases_vides(pave):
"""fonction qui cherche toutes les cases vides ayant des cases adjacentes
pleines dans un pavé (où pavé est un tableau de tuiles ou de cases vides)
retourne le tableau contenant les positions de ces cases vides et les
cases adjacentes en fonction de leur position"""
result = []
for i in range(len(pave)):
for j in range(len(pave)):
if pave[i][j] == None:
position = Position((i, j), None, None, None, None)
if is_in_array(i + 1, j, pave) and pave[i + 1][j] != None:
position.Bot = pave[i + 1][j]
if is_in_array(i - 1, j, pave) and pave[i - 1][j] != None:
position.Top = pave[i - 1][j]
if is_in_array(i, j + 1, pave) and pave[i][j + 1] != None:
position.Right = pave[i][j + 1]
if is_in_array(i, j - 1, pave) and pave[i][j - 1] != None:
position.Left = pave[i][j - 1]
if position.Top != None or position.Bot != None or position.Left != None or position.Right != None:
result.append(position)
return result | 2d2de1651f000f48ab32e484f3f6b465231248b5 | 9,066 |
def _create_scalar_tensor(vals, tensor=None):
"""Create tensor from scalar data"""
if not isinstance(vals, (tuple, list)):
vals = (vals,)
return _create_tensor(np.array(vals), tensor) | ef41eabc66eda8739a78931d53ccc6feb8dfc6bb | 9,067 |
import importlib
def is_importable(name):
""" Determines if a given package name can be found.
:param str name: The name of the pacakge
:returns: True if the package can be found
:rtype: bool
"""
return bool(importlib.util.find_spec(name)) | 548044b06d250af7f49dc3c9b4144490a5bbcc83 | 9,068 |
def make_pipeline(*steps, **kwargs):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Parameters
----------
*steps : list of estimators
A list of estimators.
memory : None, str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Returns
-------
p : Pipeline
See Also
--------
imblearn.pipeline.Pipeline : Class for creating a pipeline of
transforms with a final estimator.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None))
... # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler', StandardScaler()),
('gaussiannb', GaussianNB())])
"""
memory = kwargs.pop("memory", None)
verbose = kwargs.pop('verbose', False)
if kwargs:
raise TypeError(
'Unknown keyword arguments: "{}"'.format(list(kwargs.keys())[0])
)
return Pipeline(skpipeline._name_estimators(steps),
memory=memory, verbose=verbose) | a036c345208333b6f6d9d33998d06b282c9aa711 | 9,069 |
import logging
def say_hello(name):
"""
Log client's name which entered our application and send message to it
"""
logging.info('User %s entered', name)
return 'Hello {}'.format(name) | b79865cca34d1430bf47afabf7c96741d59ac560 | 9,070 |
import numpy
def dual_edges_2(vertices):
"""
Compute the dual edge vectors of a triangle, expressed in the
triangle plane orthonormal basis.
:param vertices: The triangle vertices (3 by n matrix with the vertices as rows (where n is the dimension of the
space)).
:returns: The triangle dual edge vectors (3 by 2 matrix with the coordinates for edge i in row i).
:rtype: :class:`Numpy array <numpy.ndarray>`
"""
t = dual_edges(vertices)
t2 = numpy.zeros((3, 2))
for i in range(3):
t2[i] = in_triangleplane_coords(vertices, t[i])
return t2 | 64ff173ef00dc4d916f00f67c7a35da25d81b535 | 9,071 |
def merge_dicts(dictionaries):
"""Merges multiple separate dictionaries into a single dictionary.
Parameters
----------
dictionaries : An iterable container of Python dictionaries.
Returns
-------
merged : A single dictionary that represents the result of merging the all the
dicts in ``dictionaries``.
Example
-------
The primary purpose of this function is to create a single dictionary
by combining multiple singleton dictionaries, as shown in the following example:
>>> dicts = [{'a': 1}, {'b': 2}, {'c': 3}]
>>> eb.merge_dicts(dicts)
{'a': 1, 'c': 3, 'b': 2}
"""
merged = dictionaries[0].copy()
for i in range(1, len(dictionaries)):
merged.update(dictionaries[i])
return merged | 1a2b5f3c539937e2e27a55ce3914f7368f0a7296 | 9,072 |
from typing import Union
from typing import Callable
def noise_distribution_to_cost_function(
noise_distribution: Union[str, Callable]
) -> Callable[[str], str]:
"""
Parse noise distribution string to a cost function definition amici can
work with.
The noise distributions listed in the following are supported. :math:`m`
denotes the measurement, :math:`y` the simulation, and :math:`\\sigma` a
distribution scale parameter
(currently, AMICI only supports a single distribution parameter).
- `'normal'`, `'lin-normal'`: A normal distribution:
.. math::
\\pi(m|y,\\sigma) = \\frac{1}{\\sqrt{2\\pi}\\sigma}\\
exp\\left(-\\frac{(m-y)^2}{2\\sigma^2}\\right)
- `'log-normal'`: A log-normal distribution (i.e. log(m) is
normally distributed):
.. math::
\\pi(m|y,\\sigma) = \\frac{1}{\\sqrt{2\\pi}\\sigma m}\\
exp\\left(-\\frac{(\\log m - \\log y)^2}{2\\sigma^2}\\right)
- `'log10-normal'`: A log10-normal distribution (i.e. log10(m) is
normally distributed):
.. math::
\\pi(m|y,\\sigma) = \\frac{1}{\\sqrt{2\\pi}\\sigma m \\log(10)}\\
exp\\left(-\\frac{(\\log_{10} m - \\log_{10} y)^2}{2\\sigma^2}\\right)
- `'laplace'`, `'lin-laplace'`: A laplace distribution:
.. math::
\\pi(m|y,\\sigma) = \\frac{1}{2\\sigma}
\\exp\\left(-\\frac{|m-y|}{\\sigma}\\right)
- `'log-laplace'`: A log-Laplace distribution (i.e. log(m) is Laplace
distributed):
.. math::
\\pi(m|y,\\sigma) = \\frac{1}{2\\sigma m}
\\exp\\left(-\\frac{|\\log m - \\log y|}{\\sigma}\\right)
- `'log10-laplace'`: A log10-Laplace distribution (i.e. log10(m) is
Laplace distributed):
.. math::
\\pi(m|y,\\sigma) = \\frac{1}{2\\sigma m \\log(10)}
\\exp\\left(-\\frac{|\\log_{10} m - \\log_{10} y|}{\\sigma}\\right)
- `'binomial'`, `'lin-binomial'`: A (continuation of a discrete) binomial
distribution, parameterized via the success probability
:math:`p=\\sigma`:
.. math::
\\pi(m|y,\\sigma) = \\operatorname{Heaviside}(y-m) \\cdot
\\frac{\\Gamma(y+1)}{\\Gamma(m+1) \\Gamma(y-m+1)}
\\sigma^m (1-\\sigma)^{(y-m)}
- `'negative-binomial'`, `'lin-negative-binomial'`: A (continuation of a
discrete) negative binomial distribution, with with `mean = y`,
parameterized via success probability `p`:
.. math::
\\pi(m|y,\\sigma) = \\frac{\\Gamma(m+r)}{\\Gamma(m+1) \\Gamma(r)}
(1-\\sigma)^m \\sigma^r
where
.. math::
r = \\frac{1-\\sigma}{\\sigma} y
The distributions above are for a single data point.
For a collection :math:`D=\\{m_i\\}_i` of data points and corresponding
simulations :math:`Y=\\{y_i\\}_i` and noise parameters
:math:`\\Sigma=\\{\\sigma_i\\}_i`, AMICI assumes independence,
i.e. the full distributions is
.. math::
\\pi(D|Y,\\Sigma) = \\prod_i\\pi(m_i|y_i,\\sigma_i)
AMICI uses the logarithm :math:`\\log(\\pi(m|y,\\sigma)`.
In addition to the above mentioned distributions, it is also possible to
pass a function taking a symbol string and returning a log-distribution
string with variables '{str_symbol}', 'm{str_symbol}', 'sigma{str_symbol}'
for y, m, sigma, respectively.
:param noise_distribution: An identifier specifying a noise model.
Possible values are
{`'normal'`, `'lin-normal'`, `'log-normal'`, `'log10-normal'`,
`'laplace'`, `'lin-laplace'`, `'log-laplace'`, `'log10-laplace'`,
`'binomial'`, `'lin-binomial'`, `'negative-binomial'`,
`'lin-negative-binomial'`, `<Callable>`}
For the meaning of the values see above.
:return: A function that takes a strSymbol and then creates a cost
function string (negative log-likelihood) from it, which can be
sympified.
"""
if isinstance(noise_distribution, Callable):
return noise_distribution
if noise_distribution in ['normal', 'lin-normal']:
y_string = '0.5*log(2*pi*{sigma}**2) + 0.5*(({y} - {m}) / {sigma})**2'
elif noise_distribution == 'log-normal':
y_string = '0.5*log(2*pi*{sigma}**2*{m}**2) ' \
'+ 0.5*((log({y}) - log({m})) / {sigma})**2'
elif noise_distribution == 'log10-normal':
y_string = '0.5*log(2*pi*{sigma}**2*{m}**2*log(10)**2) ' \
'+ 0.5*((log({y}, 10) - log({m}, 10)) / {sigma})**2'
elif noise_distribution in ['laplace', 'lin-laplace']:
y_string = 'log(2*{sigma}) + Abs({y} - {m}) / {sigma}'
elif noise_distribution == 'log-laplace':
y_string = 'log(2*{sigma}*{m}) + Abs(log({y}) - log({m})) / {sigma}'
elif noise_distribution == 'log10-laplace':
y_string = 'log(2*{sigma}*{m}*log(10)) ' \
'+ Abs(log({y}, 10) - log({m}, 10)) / {sigma}'
elif noise_distribution in ['binomial', 'lin-binomial']:
# Binomial noise model parameterized via success probability p
y_string = '- log(Heaviside({y} - {m})) - loggamma({y}+1) ' \
'+ loggamma({m}+1) + loggamma({y}-{m}+1) ' \
'- {m} * log({sigma}) - ({y} - {m}) * log(1-{sigma})'
elif noise_distribution in ['negative-binomial', 'lin-negative-binomial']:
# Negative binomial noise model of the number of successes m
# (data) before r=(1-sigma)/sigma * y failures occur,
# with mean number of successes y (simulation),
# parameterized via success probability p = sigma.
r = '{y} * (1-{sigma}) / {sigma}'
y_string = f'- loggamma({{m}}+{r}) + loggamma({{m}}+1) ' \
f'+ loggamma({r}) - {r} * log(1-{{sigma}}) ' \
f'- {{m}} * log({{sigma}})'
else:
raise ValueError(
f"Cost identifier {noise_distribution} not recognized.")
def nllh_y_string(str_symbol):
y, m, sigma = _get_str_symbol_identifiers(str_symbol)
return y_string.format(y=y, m=m, sigma=sigma)
return nllh_y_string | d26ae31211ab5a9fae2b350391ab2a835ba02758 | 9,073 |
from datetime import datetime
def serializer(cls, o):
"""
Custom class level serializer.
"""
# You can provide a custom serialize/deserialize logic for certain types.
if cls is datetime:
return o.strftime('%d/%m/%y')
# Raise SerdeSkip to tell serde to use the default serializer/deserializer.
else:
raise SerdeSkip() | 6e9bfbb83ede2c2da412b70741d793c6e24e05ef | 9,074 |
def parse_args():
""" parse command-line arguments """
usage = """Usage: bcfg2_svnlog.py [options] -r <revision> <repos>"""
parser = OptionParser(usage=usage)
parser.add_option("-v", "--verbose", help="Be verbose", action="count")
parser.add_option("-c", "--config", help="Config file",
default="/etc/bcfg2_svnlog.conf")
parser.add_option("-r", "--rev", help="Revision")
parser.add_option("--stdout", help="Print log message to stdout")
try:
(options, args) = parser.parse_args()
except OptionError:
parser.print_help()
raise SystemExit(1)
if not len(args):
parser.print_help()
raise SystemExit(1)
get_logger(options.verbose)
return (options, args.pop()) | 30ac6035e375b692a516903055b7916a601e98a5 | 9,075 |
import array
def compute_com(kpt_ids, pose_keypoints):
"""Computes center of mass from available points for each pose.
Requires at least one arm (shoulder, elbow, wrist), neck and hips.
Required keypoints to return result: at least one arm with hip, neck and [nose OR ear]
:param kpt_id: IDs of keypoints in pose_keypoints. Corresponds to kpt_names.
:param pose_keypoints: keypoints for parts of a pose. All types are in kpt_names.
:return COM/BOS tuple: tuple of main center of mass' x,y coordinates (ndarray), segment COMs (ndarray),
BOS coordinates (list of list of int)
"""
C_pts = [] # minor center of mass points
BOS = [[-1, -1], [-1, -1]] # base of support
COM = array([-1, -1]).astype(int32) # final center of mass
# legs are 3.5 to 4 heads
# 25 and 20: 20 front, 5 back
# Find length from nose/ears to neck and multiply 0.5 for front foot, 0.14 for back foot.
## Heuristics
no_right = False
no_left = False
for r_id in right_profile:
if r_id not in kpt_ids:
no_right = True
break
for l_id in left_profile:
if l_id not in kpt_ids:
no_left = True
break
face_id = -1
for f_id in face_profile:
if f_id in kpt_ids:
face_id = f_id
break
if face_id == -1:
return (COM, array(C_pts), BOS)
elif no_right and no_left:
return (COM, array(C_pts), BOS)
## Transformation
"""Two scenarios
(1) Front/Back of body: do nothing
(2) Side of body: copy point to side if needed
"""
if not no_right and no_left:
for indx in range(prof_len):
r_id = right_profile[indx]
l_id = left_profile[indx]
if pose_keypoints[l_id, 0] == -1:
pose_keypoints[l_id] = pose_keypoints[r_id]
elif no_right and not no_left:
for indx in range(prof_len):
r_id = right_profile[indx]
l_id = left_profile[indx]
if pose_keypoints[r_id, 0] == -1:
pose_keypoints[r_id] = pose_keypoints[l_id]
## Compute COM sections
face_pt = pose_keypoints[face_id]
neck_pt = pose_keypoints[1]
head_vector = (neck_pt - face_pt) # points down
nose_neck_len = sqrt(sum(head_vector * head_vector))
head_vector[0] = 0 # project to y-axis
# head_vector[1] = head_vector[1] * 1.5
r_sho_pt = pose_keypoints[2]
l_sho_pt = pose_keypoints[5]
upperRidge_pt = (r_sho_pt + l_sho_pt)/2
r_hip_pt = pose_keypoints[8]
l_hip_pt = pose_keypoints[11]
lowerRidge_pt = (r_hip_pt + l_hip_pt)/2
# Thorax COM
thorax_vector = (lowerRidge_pt - upperRidge_pt) * proximal_ratios[0]
C_pts.append((upperRidge_pt + thorax_vector).tolist())
# Upper Arms COM
r_elb_pt = pose_keypoints[3]
l_elb_pt = pose_keypoints[6]
r_uparm_vector = (r_sho_pt - r_elb_pt) * proximal_ratios[1]
l_uparm_vector = (l_sho_pt - l_elb_pt) * proximal_ratios[1]
C_pts.append((r_uparm_vector + r_elb_pt).tolist())
C_pts.append((l_uparm_vector + l_elb_pt).tolist())
# Forearms COM
r_forarm_vector = (r_elb_pt - pose_keypoints[4]) * proximal_ratios[2]
l_forarm_vector = (l_elb_pt - pose_keypoints[7]) * proximal_ratios[2]
C_pts.append((r_forarm_vector + pose_keypoints[4]).tolist())
C_pts.append((l_forarm_vector + pose_keypoints[7]).tolist())
# Thigh COM and Leg COM (OR) Total Leg COM (if pts missing)
# Right Side
if pose_keypoints[9,0] == -1: # missing leg estimation
r_total_leg_com = (head_vector * proximal_ratios[6]) + r_hip_pt
C_pts.append([0,0])
C_pts.append([0,0])
C_pts.append(r_total_leg_com.tolist())
BOS[0] = ((head_vector * 3.5) + r_hip_pt).tolist()
else:
r_knee_pt = pose_keypoints[9]
r_thigh_vector = (r_hip_pt - r_knee_pt) * proximal_ratios[3]
C_pts.append((r_thigh_vector + r_knee_pt).tolist())
if pose_keypoints[10, 0] == -1: # missing ankle estimation
r_leg_com = (head_vector * proximal_ratios[5]) + r_knee_pt
C_pts.append(r_leg_com.tolist())
BOS[0] = ((head_vector * 1.75) + r_knee_pt).tolist()
else:
r_ankle_pt = pose_keypoints[10]
r_leg_vector = (r_knee_pt - r_ankle_pt) * proximal_ratios[4]
C_pts.append((r_leg_vector + r_ankle_pt).tolist())
BOS[0] = r_ankle_pt.tolist()
C_pts.append([0,0])
# Left Side
if pose_keypoints[12,0] == -1: # missing leg estimation
l_total_leg_com = (head_vector * proximal_ratios[6]) + l_hip_pt
C_pts.append([0,0])
C_pts.append([0,0])
C_pts.append(l_total_leg_com.tolist())
BOS[1] = ((head_vector * 3.5) + l_hip_pt).tolist()
else:
l_knee_pt = pose_keypoints[12]
l_thigh_vector = (l_hip_pt - l_knee_pt) * proximal_ratios[3]
C_pts.append((l_thigh_vector + l_knee_pt).tolist())
if pose_keypoints[13, 0] == -1: # missing ankle estimation
l_leg_com = (head_vector * proximal_ratios[5]) + l_knee_pt
C_pts.append(l_leg_com.tolist())
BOS[1] = ((head_vector * 1.75) + l_knee_pt).tolist()
else:
l_ankle_pt = pose_keypoints[13]
l_leg_vector = (l_knee_pt - l_ankle_pt) * proximal_ratios[4]
C_pts.append((l_leg_vector + l_ankle_pt).tolist())
BOS[1] = l_ankle_pt.tolist()
C_pts.append([0,0])
## Compute COM from C_pts, and BOS
C_pts = array(C_pts, dtype=int32)
COM = sum(C_pts * mass_ratios, axis=0).astype(int32)
# was BOS[0][0] == BOS[1][0]
if no_left^no_right: # sagittal spreading; greedy approach
min1, min2, min3, min4 = [-1, -1, -1, -1]
if no_left: # facing towards right of image
min1 = round(BOS[0][0] - (nose_neck_len * 0.14)) # constants 0.14 and 0.5 based on my estimates
min2 = round(BOS[1][0] - (nose_neck_len * 0.14)) # of nose-neck length and foot length relative
max1 = round(BOS[0][0] + (nose_neck_len * 0.5)) # to ankle point.
max2 = round(BOS[1][0] + (nose_neck_len * 0.5))
else: # facing towards left of image
min1 = round(BOS[0][0] - (nose_neck_len * 0.5))
min2 = round(BOS[1][0] - (nose_neck_len * 0.5))
max1 = round(BOS[0][0] + (nose_neck_len * 0.14))
max2 = round(BOS[1][0] + (nose_neck_len * 0.14))
if min1 < min2:
BOS[0][0] = min1
else:
BOS[0][0] = min2
if max1 > max2:
BOS[1][0] = max1
else:
BOS[1][0] = max2
return (COM, C_pts, BOS) | 16e884ef76bdc21695349e6f0f9f9948426c5b8c | 9,076 |
import os
def certificate(cert_name):
"""Return the path to the PEM file with the given name."""
return os.path.join(os.path.dirname(__file__), 'lib', cert_name) | 5dc02c85158ae7b020f069976a581d41f31d338c | 9,077 |
def _MinimumLineCount(text: str, min_line_count: int) -> str:
"""Private implementation of minimum number of lines.
Args:
text: The source to verify the line count of.
Returns:
src: The unmodified input src.
Raises:
NoCodeException: If src is less than min_line_count long.
"""
if len(text.strip().split("\n")) < min_line_count:
raise errors.NoCodeException
return text | 037400aed0503dabee61a8d5088ca2e4b3ab34a6 | 9,078 |
def RationalQuadratic1d(
grid,
corrlen,
sigma,
alpha,
prior=None,
mu_basis=None,
mu_hyper=None,
energy=0.99
) -> Formula:
"""Rational quadratic kernel formula
"""
kernel_kwargs = {
"corrlen": corrlen,
"sigma": sigma,
"alpha": alpha
}
_Formula = create_from_kernel1d(utils.rational_quadratic)
return _Formula(
grid=grid,
prior=prior,
mu_basis=mu_basis,
mu_hyper=mu_hyper,
energy=energy,
**kernel_kwargs
) | 56d61ef851ac5c84336f7f6bda19885d85b42b26 | 9,079 |
def plot_feature_importance(feature_keys, feature_importances, ax=None, **kwargs):
"""
Plot features importance after model training (typically from scikit-learn)
Parameters
----------
feature_keys: list of string
feature_importances: `numpy.ndarray`
ax: `matplotlib.pyplot.axes`
Returns
-------
ax
"""
ax = plt.gca() if ax is None else ax
sort_mask = np.argsort(feature_importances)[::-1]
ax.bar(np.array(feature_keys)[sort_mask], np.array(feature_importances)[sort_mask], **kwargs)
for t in ax.get_xticklabels():
t.set_rotation(45)
ax.set_title("Feature importances")
return ax | aa3a747002d7c82f91de52e011b269b105c4bb70 | 9,080 |
def simulate_timestamps_till_horizon(mu, alpha, beta, Thorizon = 60, \
seed=None, node=None, output_rejected_data=False):
"""
Inputs:
mu, alpha, beta are parameters of intensity function of HP
"""
#################
# Initialisation
#################
rng = default_rng(seed) # get instance of random generator
t = 0 # initialise current time to be 0
i = 0 # set event counter to be 0
epsilon = 10**(-10) # This was used in many HP code
lambda_star = mu # upper bound at current time t = 0
ts = np.array([]); accepted_event_intensity = [lambda_star]
# containter for rejected time points and their correspodning intensities
rejected_points = []; rpy = []
# M_y stores upper bound of current times while M_x stores their x-values
M_x = []; M_y = []
#################
# Begin loop
#################
while(t < Thorizon):
previous_lambda_star = lambda_star; previous_t = t
# compute upper bound of intensity using intensity function
lambda_star = intensity_func(t+epsilon, ts, mu, alpha, beta)
u = rng.uniform(0,1) # draw a uniform random number between (0,1)
tau = -np.log(u)/lambda_star # sample inter-arrival time
t = t + tau # update current time by adding tau to current time
M_x += [previous_t,t]
M_y += [previous_lambda_star]
s = rng.uniform(0,1)# draw another standard uniform random number
# compute intensity function at current time t
lambda_t = intensity_func(t, ts, mu, alpha, beta)
if (t >= Thorizon):
break
##########################
## Rejection Sampling test
if s <= lambda_t/lambda_star:
ts = np.append(ts, float(t))
if (node != None):
ts = np.append(ts, [float(t), np.array([node])])
accepted_event_intensity.append(lambda_t)
i += 1
else:
rejected_points += [t]
rpy += [lambda_t]
if output_rejected_data:
return ts, i, accepted_event_intensity, rejected_points, rpy
return ts | 6d9e7a7c747c7a07fe94069017b32a47e3d35ac2 | 9,081 |
import logging
import time
import torch
from datetime import datetime
def jp_inference_on_dataset(model, data_loader, evaluator):
"""
Run model on the data_loader and evaluate the metrics with evaluator.
Also benchmark the inference speed of `model.forward` accurately.
The model will be used in eval mode.
Args:
model (nn.Module): a module which accepts an object from
`data_loader` and returns some outputs. It will be temporarily set to `eval` mode.
If you wish to evaluate a model in `training` mode instead, you can
wrap the given model and override its behavior of `.eval()` and `.train()`.
data_loader: an iterable object with a length.
The elements it generates will be the inputs to the model.
evaluator (DatasetEvaluator): the evaluator to run. Use `None` if you only want
to benchmark, but don't want to do any evaluation.
Returns:
The return value of `evaluator.evaluate()`
"""
num_devices = get_world_size()
logger = logging.getLogger(__name__)
logger.info("Start inference on {} images".format(len(data_loader)))
total = len(data_loader) # inference data loader must have a fixed length
if evaluator is None:
# create a no-op evaluator
evaluator = DatasetEvaluators([])
evaluator.reset()
num_warmup = min(5, total - 1)
start_time = time.perf_counter()
total_compute_time = 0
with inference_context(model), torch.no_grad():
for idx, inputs in enumerate(data_loader):
print("rank",comm.get_rank(),"is processing batch",idx)
if idx == num_warmup:
start_time = time.perf_counter()
total_compute_time = 0
start_compute_time = time.perf_counter()
outputs = model(inputs) #RUN THE MODEL!!!!!!!!!
if torch.cuda.is_available():
torch.cuda.synchronize()
total_compute_time += time.perf_counter() - start_compute_time
evaluator.process(inputs, outputs)
iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
seconds_per_img = total_compute_time / iters_after_start
if idx >= num_warmup * 2 or seconds_per_img > 5:
total_seconds_per_img = (time.perf_counter() - start_time) / iters_after_start
eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1)))
log_every_n_seconds(
logging.INFO,
"Inference done {}/{}. {:.4f} s / img. ETA={}".format(
idx + 1, total, seconds_per_img, str(eta)
),
n=5,
)
# Measure the time only for this worker (before the synchronization barrier)
total_time = time.perf_counter() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
# NOTE this format is parsed by grep
logger.info(
"Total inference time: {} ({:.6f} s / img per device, on {} devices)".format(
total_time_str, total_time / (total - num_warmup), num_devices
)
)
total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
logger.info(
"Total inference pure compute time: {} ({:.6f} s / img per device, on {} devices)".format(
total_compute_time_str, total_compute_time / (total - num_warmup), num_devices
)
)
results = evaluator.evaluate()
# An evaluator may return None when not in main process.
# Replace it by an empty dict instead to make it easier for downstream code to handle
if results is None:
results = {}
return results | e18113b4fc47bf48562bdee8dc8e4a2bdbe4c884 | 9,082 |
def boolToYes(b):
"""Convert a Boolean input into 'yes' or 'no'
Args:
b (bool): The Boolean value to be converted
Returns:
str: 'yes' if b is True, and 'no' otherwise.
"""
if b:
return "yes"
else:
return "no" | ff94b66b5a166592062bf1d5b286b425e7997304 | 9,083 |
def top_symptoms(dic, title):
"""Find and plot top symptoms in the dictionary based on count
Args:
dic (dict): Dictionary containing text-count pair
Returns:
[dictionary]: Top 5 symptoms with their count
"""
assert isinstance(dic, dict) and len(dic) > 0, "dic is not a nonempty dictionary"
labels = []
sizes = []
counts = 0
top5 = sorted(dic, key=dic.get, reverse=True)[:5]
others = sorted(dic, key=dic.get, reverse=True)[5:]
for i in others:
counts += dic[i]
for i in top5:
labels.append(i)
sizes.append(dic[i])
labels.append("OTHER")
sizes.append(counts)
fig = go.Figure(data=[go.Pie(labels=labels, values=sizes, hole=0.3)])
fig.update_layout(
title=title,
template=None,
title_x=0.5,
width=1000,
height=900,
margin=dict(l=20, r=20, t=50, b=20),
legend=dict(font=dict(size=25, color="black")),
)
fig.show()
return top5 | 1acfcec04d2a5c11f7f1a4e90eb9142de042c875 | 9,084 |
def _calc_z(h: DataArray, zice: DataArray, zeta: DataArray,
s: DataArray, Cs: DataArray,
hc: float, Vtransform: int) -> DataArray:
"""
Calculate grid z-coord depth given water depth (h), iceshelf depth (zice),
sea surface (zeta), and vertical grid transformation parameters.
Inputs:
h, zice, zeta - bathymetry extracted by set_hzz
s, Cs, hc, Vtransform - ROMS grid transformation parameters
Output:
z - depth of rho/w points
"""
if Vtransform == 1:
z0 = hc*s + (h-zice-hc)*Cs
z = zeta*(1.0+z0/(h-zice)) + z0 - zice
elif Vtransform == 2:
z0 = (hc*s + (h-zice)*Cs) / (hc+h-zice)
z = zeta + (zeta+h-zice)*z0 - zice
return z | 6580d3c2825cbea0bba33d03b2c0ad62bbd5b227 | 9,085 |
def gap_loss(preds, D, A):
"""
This module implement the loss function in paper [Azada Zazi, Will Hang. et al, 2019] Nazi, Azade & Hang, Will & Goldie, Anna & Ravi, Sujith & Mirhoseini, Azalia. (2019). GAP: Generalizable Approximate Graph Partitioning Framework.
Args:
preds (tensor(float)): output predited value, have size n x g
D (tensor(float)): degree of nodes, have size n x 1
A (tensor(bool)): adjacent matrix of graph, have size n x n
Returns:
float: the results of the loss function
"""
temp = tf.matmul(tf.transpose(preds), D)
temp = tf.div(preds, temp)
temp = tf.matmul(temp, tf.transpose(1-preds))
temp = tf.multiply(temp, A)
return tf.reduce_sum(temp) | 9418ee8bda3e7b1a5284c36412fefa158eec0f91 | 9,086 |
def number_of_hole(img, hole_img, hole_counter):
""" 判斷hole的數量去執行相對應的函式
0個hole執行zero_of_hole
1個hole執行one_of_hole
2個hole執行my_text.set("Answer : 8")
大於2個hole則執行my_text.set("Error : holes number = " + str(hole_counter) + "( > 2 )")) """
switcher = {
0:zero_of_hole,
1:one_of_hole,
2:lambda x1, x2:my_text.set("Answer : 8") #參數x1, x2從未使用, 為了return function
}
func = switcher.get(hole_counter, lambda x1, x2:my_text.set("Error : holes number = " + str(hole_counter) + "( > 2 )")) #參數x1, x2從未使用, 為了return function
return func(img, hole_img) | 583fd05b0f10e3ea1c7cee11bd416b8d41d7f840 | 9,087 |
def get_merged_by_value_coords(spans_value, digits=None):
"""returns adjacent spans merged if they have the same value. Assumes
[(start, end, val), ..] structure and that spans_value is sorted in
ascending order.
Arguments:
- digits: if None, any data can be handled and exact values are
compared. Otherwise values are rounded to that many digits.
"""
assert len(spans_value[0]) == 3, 'spans_value must have 3 records per row'
starts, ends, vals = zip(*spans_value)
indices_distinct_vals = get_run_start_indices(vals, digits=digits)
data = []
i = 0
for index, val in indices_distinct_vals:
start = starts[index]
end = ends[index]
prev_index = max(index-1, 0)
try:
data[-1][1] = ends[prev_index]
except IndexError:
pass
data.append([start, end, val])
if index < len(ends):
data[-1][1] = ends[-1]
return data | c186c503972b4b48e627c14df77bd5a780b59f5b | 9,088 |
def vint_mask_for_length(length):
"""
Returns the bitmask for the first byte of a variable-length integer (used for element ID and size descriptors).
:arg length: the length of the variable-length integer
:type length: int
:returns: the bitmask for the first byte of the variable-length integer
:rtype: int
"""
return 0b10000000 >> (length - 1) | 92fe3cb0fa09713ff4b650349294a2b241bb3918 | 9,089 |
from itertools import tee
def parse(tokens):
"""
S-expr ::= ( S-expr* ) | AtomSymbol | ' S-expr
' S-expr = (quote S-expr)
"""
def _parse(tokens):
while True:
token = next(tokens)
if token == "(":
s_expr = []
while True:
tokens, tokens_clone = tee(tokens)
if next(tokens_clone) == ")":
next(tokens)
return tuple(s_expr)
s_expr.append(_parse(tokens))
elif token == ")" or token == "":
raise Exception("parse error")
elif token == "'":
return "quote", _parse(tokens)
else:
return token
s_expr = _parse(tokens)
if next(tokens) != "":
raise Exception("parse error")
return s_expr | 90c8e3cd8482899749d30d5344390cfd5f24989f | 9,090 |
import numpy
import warnings
def preproc(raw,
dark=None,
flat=None,
solidangle=None,
polarization=None,
absorption=None,
mask=None,
dummy=None,
delta_dummy=None,
normalization_factor=1.0,
empty=None,
split_result=False,
variance=None,
dark_variance=None,
poissonian=False,
dtype=numpy.float32
):
"""Common preprocessing step for all integration engines
:param data: raw value, as a numpy array, 1D or 2D
:param mask: array non null where data should be ignored
:param dummy: value of invalid data
:param delta_dummy: precision for invalid data
:param dark: array containing the value of the dark noise, to be subtracted
:param flat: Array containing the flatfield image. It is also checked for dummies if relevant.
:param solidangle: the value of the solid_angle. This processing may be performed during the rebinning instead. left for compatibility
:param polarization: Correction for polarization of the incident beam
:param absorption: Correction for absorption in the sensor volume
:param normalization_factor: final value is divided by this
:param empty: value to be given for empty bins
:param split_result: set to true to separate signal from normalization and
return an array of float2, float3 (with variance) ot float4 (including counts)
:param variance: provide an estimation of the variance, enforce
split_result=True and return an float3 array with variance in second position.
:param dark_variance: provide an estimation of the variance of the dark_current,
enforce split_result=True and return an float3 array with variance in second position.
:param poissonian: set to "True" for assuming the detector is poissonian and variance = max(1, raw + dark)
:param dtype: dtype for all processing
All calculation are performed in single precision floating point (32 bits).
NaN are always considered as invalid values
if neither empty nor dummy is provided, empty pixels are 0.
Empty pixels are always zero in "split_result" mode.
When set to False, i.e the default, the pixel-wise operation is:
.. math::
I = \\frac{raw - dark}{flat \\cdot solidangle \\cdot polarization \\cdot absorption}
Invalid pixels are set to the dummy or empty value.
When split_result is set to True, each result is a float2
or a float3 (with an additional value for the variance) as such:
I = [:math:`raw - dark`, :math:`variance`, :math:`flat \\cdot solidangle \\cdot polarization \\cdot absorption`]
If split_result is 4, then the count of pixel is appended to the list, i.e. 1 or 0 for masked pixels
Empty pixels will have all their 2 or 3 or 4 values to 0 (and not to dummy or empty value)
If poissonian is set to True, the variance is evaluated as raw + dark, with a minimum of 1.
"""
if isinstance(dtype, str):
dtype = numpy.dtype(dtype).type
shape = raw.shape
out_shape = list(shape)
if split_result or (variance is not None) or poissonian:
if split_result == 4:
out_shape += [4]
elif (variance is not None) or poissonian:
out_shape += [3]
else:
out_shape += [2]
split_result = True
size = raw.size
if (mask is None) or (mask is False):
mask = numpy.zeros(size, dtype=bool)
else:
assert mask.size == size, "Mask array size is correct"
mask = numpy.ascontiguousarray(mask.ravel(), dtype=bool)
if (dummy is not None) and (delta_dummy is not None):
check_dummy = True
cdummy = dtype(dummy)
ddummy = dtype(delta_dummy)
elif (dummy is not None):
check_dummy = True
cdummy = dtype(dummy)
ddummy = 0.0
else:
check_dummy = False
cdummy = dtype(empty or 0.0)
ddummy = 0.0
signal = numpy.ascontiguousarray(raw.ravel(), dtype=dtype)
normalization = numpy.zeros_like(signal) + normalization_factor
if variance is not None:
variance = numpy.ascontiguousarray(variance.ravel(), dtype=dtype)
elif poissonian:
variance = numpy.maximum(1.0, signal) # this makes a copy
# runtime warning here
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if check_dummy:
# runtime warning here
if ddummy == 0:
mask |= (signal == cdummy)
else:
mask |= (abs(signal - cdummy) <= ddummy)
if dark is not None:
assert dark.size == size, "Dark array size is correct"
dark = numpy.ascontiguousarray(dark.ravel(), dtype=dtype)
if check_dummy:
# runtime warning here
if ddummy == 0:
mask |= (dark == cdummy)
else:
mask |= abs(dark - cdummy) < ddummy
signal -= dark
if poissonian:
variance += dark
elif dark_variance is not None:
variance += dark_variance
if flat is not None:
assert flat.size == size, "Flat array size is correct"
flat = numpy.ascontiguousarray(flat.ravel(), dtype=dtype)
if check_dummy:
# runtime warning here
if ddummy == 0:
mask |= (flat == cdummy)
else:
mask |= abs(flat - cdummy) <= ddummy
normalization *= flat
if polarization is not None:
assert polarization.size == size, "Polarization array size is correct"
normalization *= numpy.ascontiguousarray(polarization.ravel(), dtype=dtype)
if solidangle is not None:
assert solidangle.size == size, "Solid angle array size is correct"
normalization *= numpy.ascontiguousarray(solidangle.ravel(), dtype=dtype)
if absorption is not None:
assert absorption.size == size, "Absorption array size is correct"
normalization *= numpy.ascontiguousarray(absorption.ravel(), dtype=dtype)
mask |= numpy.logical_not(numpy.isfinite(signal))
mask |= numpy.logical_not(numpy.isfinite(normalization))
mask |= (normalization == 0)
if variance is not None:
mask |= numpy.logical_not(numpy.isfinite(variance))
if split_result:
result = numpy.zeros(out_shape, dtype=dtype)
signal[mask] = 0.0
normalization[mask] = 0.0
result[..., 0] = signal.reshape(shape)
if out_shape[-1] == 4:
if variance is not None:
variance[mask] = 0.0
result[..., 1] = variance.reshape(shape)
result[..., 2] = normalization.reshape(shape)
result[..., 3] = 1.0 - mask.reshape(shape)
elif variance is None:
result[:, :, 1] = normalization.reshape(shape)
else:
variance[mask] = 0.0
result[..., 1] = variance.reshape(shape)
result[..., 2] = normalization.reshape(shape)
else:
result = signal / normalization
result[mask] = cdummy
result.shape = shape
return result | 9a21af39470b1f48c81d043a1d4a9ca045804093 | 9,091 |
import scipy
def lB_2_T(lB, T0=298, sigma=4E-10, ret_res=False):
"""Solves for temperature at given Bjerrum length under condition from Adhikari et al. 2019 that lB/l = 1.2 at 298 K."""
def cond(T, lB, sigma=sigma):
"""condition function whose root gives the temperature T given Bjerrum length lB."""
return lB_fn(T, sigma=sigma) - lB
T = scipy.optimize.fsolve(cond, T0, args=(lB,))[0]
if ret_res:
res = np.abs(lB_fn(T, sigma=sigma) - lB)
return T, res
return T | 73d349d95cd69076874e7147280322535b6b1651 | 9,092 |
from typing import Iterable
from typing import Union
import dataclasses
def make_datacls(
cls_name: str,
fields: Iterable[Union[tuple[str, type], tuple[str, type, dataclasses.Field]]],
init: bool = True,
**kwargs,
) -> type:
"""
Return a new dataclass. This function wraps the Python dataclasses.make_dataclass
function, with the following changes to the generated __init__ method:
• initialization method only processes keyword arguments
• initialization method ignores unexpected keyword arguments
• fields (with default values or not) can be declared in any order
• Optional[...] fields default to None if no default value is specified
Keyword arguments are passed on to the dataclasses.make_dataclass function.
"""
dataclass = dataclasses.make_dataclass(
cls_name=cls_name,
fields=fields,
init=False,
**kwargs,
)
if init:
dataclass.__init__ = _datacls_init(dataclass)
return dataclass | d3797443212504605310ed75fbcb5ce37570b868 | 9,093 |
def square_loss(X, y, theta, reg_beta=0.0):
"""Computes squared loss and gradient.
Based on mean square margin loss.
X: (k, n) data items.
y: (k, 1) result (+1 or -1) for each data item in X.
theta: (n, 1) parameters.
reg_beta: optional regularization strength, for L2 regularization.
Returns (loss, dtheta) where loss is the aggregate numeric loss for this
theta, and dtheta is (n, 1) gradients for theta based on that loss.
Note: the mean (division by k) helps; otherwise, the loss is very large and
a tiny learning rate is required to prevent divergence in the beginning of
the search.
"""
k, n = X.shape
margin = y * X.dot(theta)
diff = margin - 1
loss = np.dot(diff.T, diff) / k + np.dot(theta.T, theta) * reg_beta / 2
dtheta = np.zeros_like(theta)
for j in range(n):
dtheta[j, 0] = (2 * np.dot((diff * y).T, X[:, j]) / k +
reg_beta * theta[j, 0])
return loss.flat[0], dtheta | 3a1cc74eed3abd9c3a7921c9ea02e2169594f504 | 9,094 |
import glob
def open_mf_wrf_dataset(paths, chunks=None, compat='no_conflicts', lock=None,
preprocess=None):
"""Open multiple WRF files as a single WRF dataset.
Requires dask to be installed. Note that if your files are sliced by time,
certain diagnostic variable computed out of accumulated variables (e.g.
PRCP) won't be available, because not computable lazily.
This code is adapted from xarray's open_mfdataset function. The xarray
license is reproduced in the salem/licenses directory.
Parameters
----------
paths : str or sequence
Either a string glob in the form "path/to/my/files/\*.nc" or an
explicit list of files to open.
chunks : int or dict, optional
Dictionary with keys given by dimension names and values given by chunk
sizes. In general, these should divide the dimensions of each dataset.
If int, chunk each dimension by ``chunks`` .
By default, chunks will be chosen to load entire input files into
memory at once. This has a major impact on performance: please see
xarray's full documentation for more details.
compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts when merging:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
lock : False, True or threading.Lock, optional
This argument is passed on to :py:func:`dask.array.from_array`. By
default, a per-variable lock is used when reading data from netCDF
files with the netcdf4 and h5netcdf engines to avoid issues with
concurrent access when using dask's multithreaded backend.
Returns
-------
xarray.Dataset
"""
if isinstance(paths, basestring):
paths = sorted(glob(paths))
if not paths:
raise IOError('no files to open')
# TODO: current workaround to dask thread problems
dask.set_options(get=dask.get)
if lock is None:
lock = _default_lock(paths[0], 'netcdf4')
datasets = [open_wrf_dataset(p, chunks=chunks or {}, lock=lock)
for p in paths]
file_objs = [ds._file_obj for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
combined = xr.auto_combine(datasets, concat_dim='time', compat=compat)
combined._file_obj = _MultiFileCloser(file_objs)
combined.attrs = datasets[0].attrs
# drop accumulated vars if needed (TODO: make this not hard coded)
vns = ['PRCP', 'PRCP_C', 'PRCP_NC']
vns = [vn for vn in vns if vn in combined.variables]
combined = combined.drop(vns)
return combined | 9cf95b6da852406b2b24862604cd357c01f88a93 | 9,095 |
from typing import Optional
from pathlib import Path
def parse_args_and_add_yaml_variables(parser: ArgumentParser,
yaml_config_file: Optional[Path] = None,
project_root: Optional[Path] = None,
fail_on_unknown_args: bool = False) -> ParserResult:
"""
Reads arguments from sys.argv, modifies them with secrets from local YAML files,
and parses them using the given argument parser.
:param project_root: The root folder for the whole project. Only used to access a private settings file.
:param parser: The parser to use.
:param yaml_config_file: The path to the YAML file that contains values to supply into sys.argv.
:param fail_on_unknown_args: If True, raise an exception if the parser encounters an argument that it does not
recognize. If False, unrecognized arguments will be ignored, and added to the "unknown" field of the parser result.
:return: The parsed arguments, and overrides
"""
settings_from_yaml = read_all_settings(yaml_config_file, project_root=project_root)
return parse_arguments(parser,
settings_from_yaml=settings_from_yaml,
fail_on_unknown_args=fail_on_unknown_args) | 7d4c560a4887afd432da13df1e839cada329dd5a | 9,096 |
def load_graph(model_file):
"""Loads a TensorFlow graph from file."""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph | 41e097afd34631ce8b2b94c9a67121886a568ede | 9,097 |
def find_children(node, tag, xml_ns, ns_key):
"""
Finds the collection of children nodes
Parameters
----------
node : ElementTree.Element
tag : str
xml_ns : None|dict
ns_key : None|str
"""
if xml_ns is None:
return node.findall(tag)
elif ns_key is None:
return node.findall('default:{}'.format(tag), xml_ns)
else:
return node.findall('{}:{}'.format(ns_key, tag), xml_ns) | b51d9f588661c3f609dc53adaa328f974e17d5fb | 9,098 |
import re
def normalize_string(string, ignore_spaces, ignore_punctuation):
"""Normalizes strings to prepare them for crashing comparison."""
string = string.upper()
if ignore_punctuation:
string = re.sub(r"[^1-9a-z \n\r\t]", "", string, flags=re.I)
if ignore_spaces:
string = re.sub(r"\w+", "", string)
else:
string = string.strip()
string = re.sub(r"[ \n\r\t]+", " ", string)
return string | 31de2b9644eb0943470430c6c3f2ea8a94dfb3cf | 9,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.