content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def demean_dataframe_two_cat(df_copy, consist_var, category_col, is_unbalance):
"""
reference: Baltagi http://library.wbi.ac.id/repository/27.pdf page 176, equation (9.30)
:param df_copy: Dataframe
:param consist_var: List of columns need centering on fixed effects
:param category_col: List of fixed effects
:return: Demeaned dataframe
"""
if is_unbalance:
# first determine which is uid or the category that has the most items
max_ncat = df_copy[category_col[0]].nunique()
max_cat = category_col[0]
for cat in category_col:
if df_copy[cat].nunique() >= max_ncat:
max_ncat = df_copy[cat].nunique()
max_cat = cat
min_cat = category_col.copy()
min_cat.remove(max_cat)
min_cat = min_cat[0]
df_copy.sort_values(by=[max_cat, min_cat], inplace=True)
# demean on the first category variable, max_cat
for consist in consist_var:
df_copy[consist] = df_copy[consist] - df_copy.groupby(max_cat)[consist].transform('mean')
dummies = get_dummies(df_copy[min_cat]) # time dummies
dummies[max_cat] = df_copy[max_cat]
dummies[min_cat] = df_copy[min_cat]
dummies[max_cat] = dummies[max_cat].apply(str)
dummies[min_cat] = dummies[min_cat].apply(str)
dummies.set_index([max_cat, min_cat], inplace = True)
group_mu = dummies.groupby(level=max_cat).transform("mean")
out = dummies - group_mu # q_delta_1 @ delta_2
e = df_copy[consist_var].values
d = out.values
resid = e - d @ lstsq(d, e, rcond=None)[0]
df_out = pd.DataFrame(data=resid, columns=consist_var)
df_out[max_cat] = df_copy[max_cat]
df_out[min_cat] = df_copy[min_cat]
else: # balance
for consist in consist_var:
for cat in category_col:
df_copy[consist] = df_copy[consist] - df_copy.groupby(cat)[consist].transform('mean')
df_out = df_copy
return df_out | a6a3f0bd56be214660eca857f0fb8630879bb2a8 | 22,731 |
from datetime import datetime
import time
def get_time_string(time_obj=None):
"""The canonical time string format (in UTC).
:param time_obj: an optional datetime.datetime or timestruct (defaults to
gm_time)
Note: Changing this function will change all times that this project uses
in the returned data.
"""
if isinstance(time_obj, datetime.datetime):
if time_obj.tzinfo:
offset = time_obj.tzinfo.utcoffset(time_obj)
utc_dt = time_obj + offset
return datetime.datetime.strftime(utc_dt, STRING_FORMAT)
return datetime.datetime.strftime(time_obj, STRING_FORMAT)
elif isinstance(time_obj, time.struct_time):
return time.strftime(STRING_FORMAT, time_obj)
elif time_obj is not None:
raise TypeError("get_time_string takes only a time_struct, none, or a "
"datetime. It was given a %s" % type(time_obj))
return time.strftime(STRING_FORMAT, time.gmtime()) | 73a623474e70850dc4194e2657b7e15aaa53996f | 22,732 |
def apply_activation_checkpointing_wrapper(
model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=lambda _: True
):
"""
Applies :func:`checkpoint_wrapper` to modules within `model` based on a user-defined
configuration. For each module within `model`, the `check_fn` is used to decide
whether `module` should be wrapped with :func:`checkpoint_wrapper` or not.
Note::
This function modifies `model` in place and replaces appropriate layers with
their checkpoint-wrapped modules.
Note::
This function will not wrap the overall root module. If this is needed, please directly use
:class:`CheckpointWrapper`.
Usage::
model = nn.Sequential(
nn.Linear(10, 10), nn.Linear(10, 10), nn.Linear(10, 10)
)
check_fn = lambda l: isinstance(l, nn.Linear)
apply_activation_checkpointing(model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=check_fn)
Args:
module (nn.Module):
The model who's submodules (or self) should be wrapped with activation checkpointing.
checkpoint_wrapper_fn (Optional[Callable[nn.Module]])
A `Callable` which will wrap modules
check_fn (Optional[Callable[nn.Module, nn.Module]])
A lambda function which will be passed current layer and returns
``True`` or ``False`` depending on whether input layer should be wrapped.
Returns: None (`model` is modified inplace)
"""
return _recursive_wrap(
module=model,
auto_wrap_policy=partial(lambda_auto_wrap_policy, lambda_fn=check_fn),
wrapper_cls=checkpoint_wrapper_fn,
ignored_modules=set(),
ignored_params=set(),
only_wrap_children=True
) | ea4f7efef1f1c1c49a7cd078cae95be754f68c93 | 22,733 |
def train_validation_split(x, y):
"""
Prepare validation data with proper size
Args:
x: (pandas.DataFrame) Feature set / Affecting features
y: (pandas.Dataframe) Target set / dependent feature
Returns:
x_train: (pandas.DataFrame) Feature set / Affecting features for training
y_train: (pandas.Dataframe) Target set / dependent feature for training
x_val: (pandas.DataFrame) Feature set / Affecting features for validation
y_val: (pandas.Dataframe) Target set / dependent feature for validation
"""
# For large datasets
if x.shape[0] > 100000:
val_ratio = 0.2
# For medium size datasets
elif x.shape[0] > 1000:
val_ratio = 0.15
# For small datasets
else:
val_ratio = 0.1
# Splitting dataset into train and validation
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=val_ratio, random_state=42)
print(f"Validation data prepared."
f" Train - Validation ratio taken {int(100 - val_ratio * 100)} % - {int(val_ratio * 100)} % .")
return x_train, y_train, x_val, y_val | eac70c13b2ebd592681dfdc5cc181b558a93b233 | 22,734 |
from typing import Union
from typing import Tuple
from typing import List
def topk_accuracy(
rankings: np.ndarray, labels: np.ndarray, ks: Union[Tuple[int, ...], int] = (1, 5)
) -> List[float]:
"""Computes Top-K accuracies for different values of k
Args:
rankings: 2D rankings array: shape = (instance_count, label_count)
labels: 1D correct labels array: shape = (instance_count,)
ks: The k values in top-k, either an int or a list of ints.
Returns:
list of float: TOP-K accuracy for each k in ks
Raises:
ValueError
If the dimensionality of the rankings or labels is incorrect, or
if the length of rankings and labels aren't equal
"""
if isinstance(ks, int):
ks = (ks,)
_check_label_predictions_preconditions(rankings, labels)
# trim to max k to avoid extra computation
maxk = np.max(ks)
# compute true positives in the top-maxk predictions
tp = rankings[:, :maxk] == labels.reshape(-1, 1)
# trim to selected ks and compute accuracies
accuracies = [tp[:, :k].max(1).mean() for k in ks]
if any(np.isnan(accuracies)):
raise ValueError(f"NaN present in accuracies {accuracies}")
return accuracies | 36d3ac84b69b7d0f8764ad1213f56f82da717482 | 22,735 |
def has_substr(line, chars):
""" checks to see if the line has one of the substrings given """
for char in chars:
if char in line:
return True
return False | cf438600894ca43c177af1661a95447daa8b6b0d | 22,736 |
def multifiltertestmethod(testmethod, strfilters):
"""returns a version of the testmethod that operates on filtered strings using strfilter"""
def filteredmethod(str1, str2):
return testmethod(multifilter(str1, strfilters), multifilter(str2, strfilters))
filteredmethod.__doc__ = testmethod.__doc__
filteredmethod.name = getattr(testmethod, 'name', testmethod.__name__)
return filteredmethod | eec5e580bcc2987f8abfc23dd118897ed5d2b4c4 | 22,737 |
def getbasins(basin,Nx,Ny,Nz,S1,S2,S3):
"""
Args:
basin (numpy array): including the
Returns:
N/A
Only Extend CHGCAR while mode is 'all'
"""
temp = np.zeros(Nx*Ny*Nz*S1*S2*S3)
basins = np.resize(temp,(Nz*S3,Ny*S2,Nx*S1))
block = np.resize(temp,(Nz*S3,Ny*S2,Nx*S1))
flag = 0
b = 1
teemp = []
for kss in range(Nz*S3):
for jss in range(Ny*S2):
for iss in range(Nx*S1):
flag += 1
if (flag == Nx*Ny*Nz+1):
b += 1
flag = 1
# print ('Nx:{:0} Ny:{:1} Nz:{:2} flagx:{:3} flagy:{:4} flagz:{:5}'.format(Nx,Ny,Nz,flagx,flagy,flagz))
block[kss,jss,iss] = b
basins[kss,jss,iss] = int(S1*S2*S3*(basin[kss%Nz,jss%Ny,iss%Nx]-1)) + b
basins_1D = np.resize(basins,Nx*Ny*Nz*S1*S2*S3)
# numindex = []
# numcount = [0,0,0,0,0,0,0,0]
# for i in basins_1D:
# numcount[int(i)-1] += 1
# print (numcount)
return basins_1D | 97c489a7453aced3624f6898896924647f387d55 | 22,738 |
def makeframefromhumanstring(s):
"""Create a frame from a human readable string
Strings have the form:
<request-id> <stream-id> <stream-flags> <type> <flags> <payload>
This can be used by user-facing applications and tests for creating
frames easily without having to type out a bunch of constants.
Request ID and stream IDs are integers.
Stream flags, frame type, and flags can be specified by integer or
named constant.
Flags can be delimited by `|` to bitwise OR them together.
If the payload begins with ``cbor:``, the following string will be
evaluated as Python literal and the resulting object will be fed into
a CBOR encoder. Otherwise, the payload is interpreted as a Python
byte string literal.
"""
fields = s.split(b' ', 5)
requestid, streamid, streamflags, frametype, frameflags, payload = fields
requestid = int(requestid)
streamid = int(streamid)
finalstreamflags = 0
for flag in streamflags.split(b'|'):
if flag in STREAM_FLAGS:
finalstreamflags |= STREAM_FLAGS[flag]
else:
finalstreamflags |= int(flag)
if frametype in FRAME_TYPES:
frametype = FRAME_TYPES[frametype]
else:
frametype = int(frametype)
finalflags = 0
validflags = FRAME_TYPE_FLAGS[frametype]
for flag in frameflags.split(b'|'):
if flag in validflags:
finalflags |= validflags[flag]
else:
finalflags |= int(flag)
if payload.startswith(b'cbor:'):
payload = b''.join(
cborutil.streamencode(stringutil.evalpythonliteral(payload[5:]))
)
else:
payload = stringutil.unescapestr(payload)
return makeframe(
requestid=requestid,
streamid=streamid,
streamflags=finalstreamflags,
typeid=frametype,
flags=finalflags,
payload=payload,
) | b588e61fb8b67b4160ac673eb3e70f373c7027b4 | 22,739 |
def display_full_name_with_correct_capitalization(full_name):
"""
See documentation here: https://github.com/derek73/python-nameparser
:param full_name:
:return:
"""
full_name.strip()
full_name_parsed = HumanName(full_name)
full_name_parsed.capitalize()
full_name_capitalized = str(full_name_parsed)
return full_name_capitalized | 05133fc04631a39a19f2e27355456418ab7c78a7 | 22,740 |
from typing import Optional
from typing import Iterable
from typing import Dict
from typing import Any
import collections
def load_experiment_artifacts(
src_dir: str, file_name: str, selected_idxs: Optional[Iterable[int]] = None
) -> Dict[int, Any]:
"""
Load all the files in dirs under `src_dir` that match `file_name`.
This function assumes subdirectories withing `dst_dir` have the following
structure:
```
{dst_dir}/result_{idx}/{file_name}
```
where `idx` denotes an integer encoded in the subdirectory name.
The function returns the contents of the files, indexed by the integer extracted
from the subdirectory index name.
:param src_dir: directory containing subdirectories of experiment results
It is the directory that was specified as `--dst_dir` in `run_experiment.py`
and `run_notebook.py`
:param file_name: the file name within each run results subdirectory to load
E.g., `result_bundle.pkl`
:param selected_idxs: specific experiment indices to load
- `None` (default) loads all available indices
"""
artifact_tuples = yield_experiment_artifacts(
src_dir, file_name, selected_idxs
)
artifacts = collections.OrderedDict()
for key, artifact in artifact_tuples:
artifacts[key] = artifact
return artifacts | 67d8b9aba64f79b0361e7ed175ae597b22367f8a | 22,741 |
from datetime import datetime
from typing import Tuple
def get_market_metrics(market_portfolio: pd.DataFrame, t_costs: float, index_id: str, index_name: str,
test_data_start_date: datetime.date, test_data_end_date: datetime.date, market_logs=False) -> \
Tuple[pd.Series, pd.Series, pd.Series]:
"""
Get performance metrics for full equal-weighted market portfolio
:param market_logs: Write log data for market portfolio
:param test_data_start_date: Start date (with regard to test set)
:param test_data_end_date: End date (with regard to test set)
:param index_name: Index name
:param index_id: Index ID
:param t_costs: Transaction costs per half-turn
:param market_portfolio: DataFrame including full test set (market portfolio)
:return: Tuple of market portfolio metrics (Series) and cumulative returns series (Series)
"""
market_portfolio_metrics = pd.Series([]).rename('Market')
market_portfolio_metrics.index.name = 'Metrics'
excess_return_series = calc_excess_returns(
market_portfolio.loc[:, 'daily_return'].groupby(level=['datadate']).mean()).rename('daily_excess_return')
excess_return_series = excess_return_series.reset_index()
excess_return_series.loc[:, 'datadate'] = excess_return_series['datadate'].dt.strftime(
'%Y-%m-%d')
excess_return_series.set_index('datadate', inplace=True)
cumulative_excess_return = (excess_return_series.get('daily_excess_return') + 1).cumprod().rename(
'Cumulative Market Return')
cumulative_excess_return.index.name = 'Time'
# cumulative_return.plot(title='Cumulative Market Performance')
# plt.legend(loc='best')
# plt.show()
# JOB: Calculate metrics
# noinspection DuplicatedCode
annualized_sharpe = calc_sharpe(market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(),
annualize=True)
annualized_sharpe_atc = calc_sharpe(
market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs,
annualize=True)
annualized_sortino = calc_sortino(market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(),
annualize=True)
annualized_sortino_atc = calc_sortino(
market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs,
annualize=True)
mean_daily_return = market_portfolio.groupby(level=['datadate'])['daily_return'].mean().mean()
mean_daily_excess_return = calc_excess_returns(
market_portfolio.groupby(level=['datadate'])['daily_return'].mean().rename('daily_return')).mean()
market_portfolio_metrics.loc['Mean Daily Return'] = mean_daily_return
market_portfolio_metrics.loc['Annualized Return'] = annualize_metric(mean_daily_return)
market_portfolio_metrics.loc['Mean Daily Excess Return'] = mean_daily_excess_return
market_portfolio_metrics.loc['Annualized Excess Return'] = annualize_metric(mean_daily_excess_return)
market_portfolio_metrics.loc['Annualized Sharpe'] = annualized_sharpe
market_portfolio_metrics.loc['Annualized Sortino'] = annualized_sortino
# JOB: Add metrics incl. transaction costs of 5 bps per half-turn
market_portfolio_metrics.loc['Mean Daily Return_atc'] = mean_daily_return - 4 * t_costs
market_portfolio_metrics.loc['Annualized Return_atc'] = annualize_metric(mean_daily_return - 4 * t_costs)
market_portfolio_metrics.loc['Mean Daily Excess Return_atc'] = mean_daily_excess_return - 4 * t_costs
market_portfolio_metrics.loc['Annualized Excess Return_atc'] = annualize_metric(
mean_daily_excess_return - 4 * t_costs)
market_portfolio_metrics.loc['Annualized Sharpe_atc'] = annualized_sharpe_atc
market_portfolio_metrics.loc['Annualized Sortino_atc'] = annualized_sortino_atc
data_record = {
'ID': config.run_id,
'Experiment Run End': datetime.datetime.now().isoformat(),
'Parent Model Type': 'Market',
'Model Type': 'Market',
'Index ID': index_id,
'Index Name': index_name,
'Study Period ID': config.study_period_id,
'Study Period Length': None,
'Period Range': None,
'Study Period Start Date': None,
'Study Period End Date': None,
'Test Set Size': None,
'Days Test Set': None,
'Constituent Number': None,
'Average Cross Section Size': None,
'Test Set Start Date': test_data_start_date.isoformat(),
'Test Set End Date': test_data_end_date.isoformat(),
'Total Accuracy': None,
'Top-k Accuracy Scores': None,
'Top-k Mean Daily Return': market_portfolio_metrics['Mean Daily Return'],
'Top-k Mean Daily Excess Return': market_portfolio_metrics['Mean Daily Excess Return'],
'Top-k Annualized Excess Return': market_portfolio_metrics['Annualized Excess Return'],
'Top-k Annualized Return': market_portfolio_metrics['Annualized Return'],
'Top-k Annualized Sharpe': market_portfolio_metrics['Annualized Sharpe'],
'Top-k Annualized Sortino': market_portfolio_metrics['Annualized Sortino'],
'Mean Daily Return (Short)': None,
'Mean Daily Return (Long)': None,
'Top-k Mean Daily Return_atc': market_portfolio_metrics['Mean Daily Return_atc'],
'Top-k Annualized Return_atc': market_portfolio_metrics['Annualized Return_atc'],
'Top-k Mean Daily Excess Return_atc': market_portfolio_metrics['Mean Daily Excess Return_atc'],
'Top-k Annualized Excess Return_atc': market_portfolio_metrics['Annualized Excess Return_atc'],
'Top-k Annualized Sharpe_atc': market_portfolio_metrics['Annualized Sharpe_atc'],
'Top-k Annualized Sortino_atc': market_portfolio_metrics['Annualized Sortino_atc'],
'Top-k Mean Daily Return (Short)_atc': None,
'Top-k Mean Daily Return (Long)_atc': None,
'Model Configs': None,
'Total Epochs': None,
'Return Series': excess_return_series['daily_excess_return'].to_dict(),
'Prediction Error': None
}
if market_logs:
write_to_logs(data_record)
return market_portfolio_metrics, excess_return_series, cumulative_excess_return | f5c77114c79ef901683ffdd4495a8a1022e42dc9 | 22,742 |
def get_puf_columns(seed=True, categorical=True, calculated=True):
"""Get a list of columns.
Args:
seed: Whether to include standard seed columns: ['MARS', 'XTOT', 'S006']
categorical: Whether to include categorical columns: ['F6251', 'MIDR', 'FDED', 'DSI']
calculated: Whether to include calculated columns: ['E00100', 'E09600']
Returns: List of columns.
"""
res = []
if seed:
res += SEED_COLS
if categorical:
res += CATEGORICAL_COLS
if calculated:
res += CALCULATED_COLS
return res | af7d6799b7f17b2b05b62a7ed88d0695784cde59 | 22,743 |
def list_all_vms(osvars):
"""Returns a listing of all VM objects as reported by Nova"""
novac = novaclient.Client('2',
osvars['OS_USERNAME'],
osvars['OS_PASSWORD'],
osvars['OS_TENANT_NAME'],
osvars['OS_AUTH_URL'],
service_type="compute")
return novac.servers.list(True, {'all_tenants': '1'}) | b03bc9c5e458d62403b86346bdb4f9a2c3081909 | 22,744 |
def exec_cmd(cmd, path):
""" Execute the specified command and return the result. """
out = ''
err = ''
sys.stdout.write("-------- Running \"%s\" in \"%s\"...\n" % (cmd, path))
parts = cmd.split()
try:
process = subprocess.Popen(parts, cwd=path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=(sys.platform == 'win32'))
out, err = process.communicate()
except IOError, (errno, strerror):
raise
except:
raise
return {'out': out, 'err': err} | ea75de47eebe526188533539a4d33e07d5a5bed3 | 22,745 |
def contains_numbers(iterable):
""" Check if first iterable item is a number. """
return isinstance(iterable[0], Number) | 0c6dc3031087e14ea50cb7d228da50b19a55a013 | 22,746 |
import tempfile
from pathlib import Path
import requests
def get_image(img: PathStr) -> PILImage:
"""Get picture from either a path or URL"""
if str(img).startswith("http"):
with tempfile.TemporaryDirectory() as tmpdirname:
dest = Path(tmpdirname) / str(img).split("?")[0].rpartition("/")[-1]
# NOTE: to be replaced by download(url, dest=dest) [from unpackai.utils]
with requests.get(str(img)) as resp:
resp.raise_for_status()
dest.write_bytes(resp.content)
return PILImage.create(dest)
else:
return PILImage.create(img) | 374e2ff8f97c4d63ceb3d621ced25451d10b6793 | 22,747 |
def FAIMSNETNN_model(train_df, train_y, val_df, val_y, model_args, cv=3):
"""FIT neuralnetwork model."""
input_dim = train_df.shape[1]
if model_args["grid"] == "tiny":
param_grid = {"n1": [100], "d1": [0.3, 0.1], "lr": [0.001, 0.01], "epochs": [50],
"batch_size": [32, 128], "input_dim": [input_dim]}
else:
param_grid = {"n1": [100, 200, 500], "d1": [0.5, 0.3, 0.1],
"lr": [0.0001, 0.001, 0.01], "epochs": [50],
"batch_size": [32, 64, 128], "input_dim": [input_dim]}
model = keras.wrappers.scikit_learn.KerasRegressor(build_fn=create_model, verbose=0)
gs = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=model_args["jobs"], cv=cv,
return_train_score=True, verbose=2)
gsresults = gs.fit(train_df, train_y)
# history = model.fit(train_df, train_y, validation_split=0.1, epochs=200, batch_size=16)
print(gs.best_params_)
gs.best_params_["epochs"] = 100
model = create_model(**gs.best_params_)
history = model.fit(train_df, train_y, validation_split=0.1, epochs=gs.best_params_["epochs"],
batch_size=gs.best_params_["batch_size"])
df_results, cv_res = format_summary(train_df, val_df, train_y, val_y, model, "FNN", gsresults)
cv_res["params"] = str(gs.best_params_)
return df_results, cv_res, gs, model | 6f6520a3f9a746e6e1241f638358f00ce4f20ede | 22,748 |
def assign_exam_blocks(data, departments, splitted_departments, number_exam_days):
"""
Assign departments to exam blocks and optimize this schedule to reduce conflicts.
data (pandas.DataFrame): Course enrollments data
departments (dict): Departments (str key) and courses in departments (list value)
number_exam_days (int): The number of days for exams
returns (list): Departments for each exam block
"""
# create two exam blocks per day
exam_blocks = [[] for i in range(2*number_exam_days)]
# sequentially fill exam_blocks with departments in random order
i = 0
department_list = list(departments)
index = np.random.permutation(np.arange(len(department_list)))
for j in range(len(department_list)):
department = department_list[index[j]]
exam_blocks[i%(2*number_exam_days)].append(department)
i += 1
# swap exam blocks until this swap method can no longer reduce conflicts
total_conflicts = sum([count_conflicts(data, departments, block) for block in exam_blocks])
conflicts_reduced = True
while conflicts_reduced:
conflicts_reduced = False
# do swapping between departments in exam block i and exam block j
for i in range(len(exam_blocks)-1):
for j in range(i+1, len(exam_blocks)):
do_swapping(data, departments, exam_blocks[i], exam_blocks[j])
# do swapping between the two blocks of split departments to try to minimize
# conflicts on a course basis
course_conflicts_reduced = True
while course_conflicts_reduced:
current_conflicts = sum([count_conflicts(data, departments, block) for block in exam_blocks])
course_conflicts_reduced = False
# do this for every splitted department
for department in splitted_departments:
courses = [departments[department+"-1"], departments[department+"-2"]]
# this allows us to swap from first department exam block to other department exam block
for course_index in (0, 1):
# swap any course from one department section to the other if it reduces conflicts
i = 0
while i < len(courses[course_index]):
courses[~course_index].append(courses[course_index].pop(i))
tmp_conflicts = sum([count_conflicts(data, departments, block) for block in exam_blocks])
if tmp_conflicts >= current_conflicts:
courses[course_index].insert(i, courses[~course_index].pop())
else:
course_conflicts_reduced = True
i += 1
current_conflicts = sum([count_conflicts(data, departments, block) for block in exam_blocks])
if current_conflicts < total_conflicts:
total_conflicts = current_conflicts
conflicts_reduced = True
return exam_blocks | ae272ea00a277960497d96b2593371bc35a9c3cb | 22,749 |
def aq_name(path_to_shp_file):
"""
Computes the name of a given aquifer given it's shape file
:param path_to_shp_file: path to the .shp file for the given aquifer
:return: a string (name of the aquifer)
"""
str_ags = path_to_shp_file.split('/')
str_aq = ""
if len(str_ags) >= 2:
str_aq = str(str_ags[1])
print(str_aq)
return str_aq | 1cb6f9881383b4627ea4f78bf2f6fd9cdf97dbc4 | 22,751 |
def gini(arr, mode='all'):
"""Calculate the Gini coefficient(s) of a matrix or vector.
Parameters
----------
arr : array-like
Array or matrix on which to compute the Gini coefficient(s).
mode : string, optional
One of ['row-wise', 'col-wise', 'all']. Default is 'all'.
Returns
-------
coeffs : array-like
Array of Gini coefficients.
Note
----
If arr is a transition matrix A, such that Aij = P(S_k=j|S_{k-1}=i),
then 'row-wise' is equivalent to 'tmat_departure' and 'col-wise' is
equivalent to 'tmat_arrival'.
Similarly, if arr is the observation (lambda) matrix of an HMM such that
lambda \in \mathcal{C}^{n_states \times n_units}, then 'row-wise' is
equivalent to 'lambda_across_units' and 'col-wise' is equivalent to
'lambda_across_units'.
If mode = 'all', then the matrix is unwrapped into a numel-dimensional
array before computing the Gini coefficient.
"""
if mode is None:
mode = 'row-wise'
if mode not in ['row-wise', 'col-wise', 'all']:
raise ValueError("mode '{}' not supported!".format(mode))
gini_coeffs = None
if mode=='all':
arr = np.atleast_1d(arr).astype(float)
gini_coeffs = _gini(arr)
elif mode=='row-wise':
arr = np.atleast_2d(arr).astype(float)
gini_coeffs = []
for row in arr:
gini_coeffs.append(_gini(row))
elif mode=='col-wise':
arr = np.atleast_2d(arr).astype(float)
gini_coeffs = []
for row in arr.T:
gini_coeffs.append(_gini(row))
return gini_coeffs | 9fb3116506db949d273000510724bcce0ed165e2 | 22,752 |
import requests
def getIndex():
"""
Retrieves index value.
"""
headers = {
'accept': 'application/json',
}
indexData = requests.get(
APIUrls.lnapi+APIUrls.indexUrl,
headers=headers,
)
if indexData.status_code == 200:
return indexData.json()
else:
raise RuntimeError(
'Unable to fetch index data:\n'
f'{indexData.text}'
) | ba35e1573a62e76d1f413036761b8a9054a3a878 | 22,753 |
def input_as_string(filename:str) -> str:
"""returns the content of the input file as a string"""
with open(filename, encoding="utf-8") as file:
return file.read().rstrip("\n") | 0343de48580a71a62895aa093af1213c3f0c0b84 | 22,755 |
def channame_to_python_format_string(node, succgen=None):
"""See channame_str_to_python_format_string
@succgen is optional, if given will check that identifiers can be found.
"""
if not node: #empty AST
return (True, "")
if node.type == 'Identifier': # and len(node.children) >= 1:
#if no succgen, assume its a channel
if not succgen or node.children[0] in succgen.channel_identifiers:
#Of the form "channame[x][x]...[x]"
static = True
if node.leaf: #Have IndexList?
idxs = []
for c in node.leaf.children:
assert c.type == 'Index'
(exprstatic, expr) = _expression_to_python_format_string(c.leaf, succgen)
static = static and exprstatic
idxs += [expr]
idxs = "".join(["[" + x + "]" for x in idxs])
return (static, node.children[0] + idxs)
else:
return (True, node.children[0])
else:
print node, succgen.channel_identifiers
raise IllegalExpressionException('Unknown channel ' +
node.children[0])
else:
raise IllegalExpressionException('Illegal expression type for channame: ' +
node.type) | f7e71bd49624657e98e6f2c172e6f02d8bfc7307 | 22,756 |
import numpy
import pandas
def is_bad(x):
""" for numeric vector x, return logical vector of positions that are null, NaN, infinite"""
if can_convert_v_to_numeric(x):
x = safe_to_numeric_array(x)
return numpy.logical_or(
pandas.isnull(x), numpy.logical_or(numpy.isnan(x), numpy.isinf(x))
)
return pandas.isnull(x) | b4cf9de18cd8e52ff90a801f1eccf6a4ee2500db | 22,757 |
def T0_T0star(M, gamma):
"""Total temperature ratio for flow with heat addition (eq. 3.89)
:param <float> M: Initial Mach #
:param <float> gamma: Specific heat ratio
:return <float> Total temperature ratio T0/T0star
"""
t1 = (gamma + 1) * M ** 2
t2 = (1.0 + gamma * M ** 2) ** 2
t3 = 2.0 + (gamma - 1.0) * M ** 2
return t1 / t2 * t3 | 2e5c8ec2ab24dd0d4dfa2feddd0053f277665b33 | 22,760 |
from typing import Optional
def remount_as(
ip: Optional[str] = None, writeable: bool = False, folder: str = "/system"
) -> bool:
"""
Mount/Remount file-system. Requires root
:param folder: folder to mount
:param writeable: mount as writeable or readable-only
:param ip: device ip
:rtype: true on success
"""
if writeable:
return (
shell(f"mount -o rw,remount {folder}", ip=ip).code
== ADBCommandResult.RESULT_OK
)
else:
return (
shell(f"mount -o ro,remount {folder}", ip=ip).code
== ADBCommandResult.RESULT_OK
) | 8f343f96d066543359bdfcea3c42f41f40dcaf4d | 22,761 |
def flip_channels(img):
"""Flips the order of channels in an image; eg, BGR <-> RGB.
This function assumes the image is a numpy.array (what's returned by cv2
function calls) and uses the numpy re-ordering methods. The number of
channels does not matter.
If the image array is strictly 2D, no re-ordering is possible and the
original data is returned untouched.
"""
if len(img.shape) == 2:
return img;
return img[:,:,::-1] | 7aab0222f6fd66c06f8464cd042f30c6eac01c72 | 22,762 |
def parse_main(index):
"""Parse a main function containing block items.
Ex: int main() { return 4; }
"""
err = "expected main function starting"
index = match_token(index, token_kinds.int_kw, ParserError.AT, err)
index = match_token(index, token_kinds.main, ParserError.AT, err)
index = match_token(index, token_kinds.open_paren, ParserError.AT, err)
index = match_token(index, token_kinds.close_paren, ParserError.AT, err)
node, index = parse_compound_statement(index)
return nodes.Main(node), index | ab932cf3d99340b97ec7d32fa668c4e00e16a3d1 | 22,764 |
def elist2tensor(elist, idtype):
"""Function to convert an edge list to edge tensors.
Parameters
----------
elist : iterable of int pairs
List of (src, dst) node ID pairs.
idtype : int32, int64, optional
Integer ID type. Must be int32 or int64.
Returns
-------
(Tensor, Tensor)
Edge tensors.
"""
if len(elist) == 0:
u, v = [], []
else:
u, v = zip(*elist)
u = list(u)
v = list(v)
return F.tensor(u, idtype), F.tensor(v, idtype) | a38c26a13b2fc7f111e3ec2c036e592b5b4c3c70 | 22,765 |
from datetime import datetime
def _term_to_xapian_value(term, field_type):
"""
Converts a term to a serialized
Xapian value based on the field_type.
"""
assert field_type in FIELD_TYPES
def strf(dt):
"""
Equivalent to datetime.datetime.strptime(dt, DATETIME_FORMAT)
but accepts years below 1900 (see http://stackoverflow.com/q/10263956/931303)
"""
return '%04d%02d%02d%02d%02d%02d' % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
if field_type == 'boolean':
assert isinstance(term, bool)
if term:
value = 't'
else:
value = 'f'
elif field_type == 'integer':
value = INTEGER_FORMAT % term
elif field_type == 'float':
value = xapian.sortable_serialise(term)
elif field_type == 'date' or field_type == 'datetime':
if field_type == 'date':
# http://stackoverflow.com/a/1937636/931303 and comments
term = datetime.datetime.combine(term, datetime.time())
value = strf(term)
else: # field_type == 'text'
value = _to_xapian_term(term)
return value | 8fe2926a7093ff9a7b22cc222c4a3c5c8f6bc155 | 22,766 |
def pop_stl1(osurls, radiourls, splitos):
"""
Replace STL100-1 links in 10.3.3+.
:param osurls: List of OS platforms.
:type osurls: list(str)
:param radiourls: List of radio platforms.
:type radiourls: list(str)
:param splitos: OS version, split and cast to int: [10, 3, 3, 2205]
:type splitos: list(int)
"""
if newer_103(splitos, 3):
osurls = osurls[1:]
radiourls = radiourls[1:]
return osurls, radiourls | d88576028bbfbf61ab6fec517e7a66d731b4ebf3 | 22,767 |
def empty_search():
"""
:return: json response of empty list, meaning empty search result
"""
return jsonify(results=[]) | 59ac0a6d3b9a3d17f7a80e633ea4bb5b2d07ca33 | 22,768 |
def clip_raster_mean(raster_path, feature, var_nam):
""" Opens a raster file from raster_path and applies a mask based on
a polygon (feature). It then extracts the percentage of every class
with respects to the total number of pixels contained in the mask.
:param raster_path: raster path (raster must contain classes)
:param feature: polygon feature (extracted from a shapefile or geojson)
:return: dictionary containing the percentage of pixels contained
in the mask
"""
with rasterio.open(raster_path) as src:
# Apply mask to raster and crop
out_image, out_transform = rasterio.mask.mask(src,
[feature["geometry"]], crop=True)
if var_nam == 'PTED':
out_image[out_image < 0] = np.nan
return np.nanmean(out_image) | 82244272c2da713f679d0f56f5736810fcf8649c | 22,769 |
import json
def load_data(in_file):
"""load json file from seqcluster cluster"""
with open(in_file) as in_handle:
return json.load(in_handle) | 93c1766cb1e36410a8c67e2291b93aa7280abd63 | 22,770 |
import re
def expand_at_linestart(P, tablen):
"""只扩展行开头的制表符号"""
def exp(m):
return m.group().expandtabs(tablen)
return ''.join([ re.sub(r'^\s+', exp, s) for s in P.splitlines(True) ]) | 2b8310e89efdba54b121667e11454281e2c214e3 | 22,772 |
def configs():
"""Create a mock Configuration object with sentinel values
Eg.
Configuration(
base_jar=sentinel.base_jar,
config_file=sentinel.config_file,
...
)
"""
return Configuration(**dict(
(k, getattr(sentinel, k))
for k in DEFAULTS._asdict().keys()
)) | c8aa44e1c9695a8fe0188d739c87be07ab06bdb0 | 22,773 |
def svn_fs_revision_root_revision(root):
"""svn_fs_revision_root_revision(svn_fs_root_t * root) -> svn_revnum_t"""
return _fs.svn_fs_revision_root_revision(root) | ce153da9527fb8b1235f5591dbd68e2f1c1ecab2 | 22,774 |
from typing import Any
def is_floatscalar(x: Any) -> bool:
"""Check whether `x` is a float scalar.
Parameters:
----------
x: A python object to check.
Returns:
----------
`True` iff `x` is a float scalar (built-in or Numpy float).
"""
return isinstance(x, (
float,
np.float16,
np.float32,
np.float64,
)) | 2a93524290eaa4b4e1f0b0cc7a8a0dcb2a46f9d3 | 22,776 |
def http_header_control_cache(request):
""" Tipo de control de cache
url: direccion de la pagina web"""
print "--------------- Obteniendo cache control -------------------"
try:
cabecera = request.headers
cache_control = cabecera.get("cache-control")
except Exception:
cache_control = "NA"
print "Error inesperado en la %s no se encontro cache_control" % (url)
return cache_control | 976adffa3c3601c6f0fd49617e15e25aa9cb2c9b | 22,777 |
def summation(limit):
"""
Returns the summation of all natural numbers from 0 to limit
Uses short form summation formula natural summation
:param limit: {int}
:return: {int}
"""
return (limit * (limit + 1)) // 2 if limit >= 0 else 0 | 1ff16c7c4131458e50c9c9bd5c0f20895d8ab121 | 22,778 |
def load_and_initialize_hub_module(module_path, signature='default'):
"""Loads graph of a TF-Hub module and initializes it into a session.
Args:
module_path: string Path to TF-Hub module.
signature: string Signature to use when creating the apply graph.
Return:
graph: tf.Graph Graph of the module.
session: tf.Session Session with initialized variables and tables.
inputs: dict Dictionary of input tensors.
outputs: dict Dictionary of output tensors.
Raises:
ValueError: If signature contains a SparseTensor on input or output.
"""
graph = tf.Graph()
with graph.as_default():
tf.compat.v1.logging.info('Importing %s', module_path)
module = hub.Module(module_path)
signature_inputs = module.get_input_info_dict(signature)
signature_outputs = module.get_output_info_dict(signature)
# First check there are no SparseTensors in input or output.
for key, info in list(signature_inputs.items()) + list(
signature_outputs.items()):
if info.is_sparse:
raise ValueError(
'Signature "%s" has a SparseTensor on input/output "%s".'
' SparseTensors are not supported.' % (signature, key))
# Create placeholders to represent the input of the provided signature.
inputs = {}
for input_key, input_info in signature_inputs.items():
inputs[input_key] = tf.compat.v1.placeholder(
shape=input_info.get_shape(), dtype=input_info.dtype, name=input_key)
outputs = module(inputs=inputs, signature=signature, as_dict=True)
session = tf.compat.v1.Session(graph=graph)
session.run(tf.compat.v1.global_variables_initializer())
session.run(tf.compat.v1.tables_initializer())
return graph, session, inputs, outputs | b04b5f77c7e0207d314ebb5910ec1c5e61f4755c | 22,779 |
def j_index(true_labels, predicts):
""" j_index
Computes the Jaccard Index of the given set, which is also called the
'intersection over union' in multi-label settings. It's defined as the
intersection between the true label's set and the prediction's set,
divided by the sum, or union, of those two sets.
Parameters
----------
true_labels: numpy.ndarray of shape (n_samples, n_target_tasks)
A matrix with the true labels for all the classification tasks and for
n_samples.
predicts: numpy.ndarray of shape (n_samples, n_target_tasks)
A matrix with the predictions for all the classification tasks and for
n_samples.
Returns
-------
float
The J-index, or 'intersection over union', for the given sets.
Examples
--------
>>> from skmultiflow.evaluation.metrics.metrics import j_index
>>> true_labels = [[0,1,0,1],[0,0,0,1],[1,1,0,1],[1,1,1,1]]
>>> predictions = [[0,1,0,1],[0,1,1,0],[0,1,0,1],[1,1,1,1]]
>>> j_index(true_labels, predictions)
0.66666666666666663
"""
if not hasattr(true_labels, 'shape'):
true_labels = np.asarray(true_labels)
if not hasattr(predicts, 'shape'):
predicts = np.asarray(predicts)
N, L = true_labels.shape
s = 0.0
for i in range(N):
inter = sum((true_labels[i, :] * predicts[i, :]) > 0) * 1.
union = sum((true_labels[i, :] + predicts[i, :]) > 0) * 1.
if union > 0:
s += inter / union
elif np.sum(true_labels[i, :]) == 0:
s += 1.
return s * 1. / N | 33bef64196acf441c299a4a90da64b2bb866e364 | 22,780 |
import torch
def odefun(x, t, net, alph=[1.0,1.0,1.0]):
"""
neural ODE combining the characteristics and log-determinant (see Eq. (2)), the transport costs (see Eq. (5)), and
the HJB regularizer (see Eq. (7)).
d_t [x ; l ; v ; r] = odefun( [x ; l ; v ; r] , t )
x - particle position
l - log determinant
v - accumulated transport costs (Lagrangian)
r - accumulates violation of HJB condition along trajectory
"""
nex, d_extra = x.shape
d = d_extra - 3
z = pad(x[:, :d], (0, 1, 0, 0), value=t) # concatenate with the time t
gradPhi, trH = net.trHess(z)
dx = -(1.0/alph[0]) * gradPhi[:,0:d]
dl = -(1.0/alph[0]) * trH.unsqueeze(1)
dv = 0.5 * torch.sum(torch.pow(dx, 2) , 1 ,keepdims=True)
dr = torch.abs( -gradPhi[:,-1].unsqueeze(1) + alph[0] * dv )
return torch.cat( (dx,dl,dv,dr) , 1 ) | 1523b28f1568bcd668a3f8cc8ce39dfb7d8096fe | 22,781 |
def create_transform(num_flow_steps,
param_dim,
context_dim,
base_transform_kwargs):
"""Build a sequence of NSF transforms, which maps parameters x into the
base distribution u (noise). Transforms are conditioned on strain data y.
Note that the forward map is f^{-1}(x, y).
Each step in the sequence consists of
* A linear transform of x, which in particular permutes components
* A NSF transform of x, conditioned on y.
There is one final linear transform at the end.
This function was adapted from the uci.py example in
https://github.com/bayesiains/nsf
Arguments:
num_flow_steps {int} -- number of transforms in sequence
param_dim {int} -- dimensionality of x
context_dim {int} -- dimensionality of y
base_transform_kwargs {dict} -- hyperparameters for NSF step
Returns:
Transform -- the constructed transform
"""
transform = transforms.CompositeTransform([
transforms.CompositeTransform([
create_linear_transform(param_dim),
create_base_transform(i, param_dim, context_dim=context_dim,
**base_transform_kwargs)
]) for i in range(num_flow_steps)
] + [
create_linear_transform(param_dim)
])
return transform | d4d556163af777f50aed2f4d86b1ae9c1de81047 | 22,782 |
def for_in_pyiter(it):
"""
>>> for_in_pyiter(Iterable(5))
[0, 1, 2, 3, 4]
"""
l = []
for item in it:
l.append(item)
return l | 7d5c44ce771ea9847d57749235a31f200a01b67f | 22,784 |
def train_test_split_with_none(X, y=None, sample_weight=None, random_state=0):
"""
Splits into train and test data even if they are None.
@param X X
@param y y
@param sample_weight sample weight
@param random_state random state
@return similar to :epkg:`scikit-learn:model_selection:train_test_split`.
"""
not_none = [_ for _ in [X, y, sample_weight] if _ is not None]
res = train_test_split(*not_none)
inc = len(not_none)
trains = []
tests = []
for i in range(inc):
trains.append(res[i * 2])
tests.append(res[i * 2 + 1])
while len(trains) < 3:
trains.append(None)
tests.append(None)
X_train, y_train, w_train = trains
X_test, y_test, w_test = tests
return X_train, y_train, w_train, X_test, y_test, w_test | 8a789d6001a56096eba556301e130c57edd8cf87 | 22,785 |
def measure_time(func, repeat=1000):
"""
Repeatedly executes a function
and records lowest time.
"""
def wrapper(*args, **kwargs):
min_time = 1000
for _ in range(repeat):
start = timer()
result = func(*args, **kwargs)
curr_time = timer() - start
if curr_time < min_time:
min_time = curr_time
return [min_time, result]
return wrapper | 0515eca9cfa96a7395b3461bd3302a9780d05366 | 22,786 |
def initialise_players(frame_data, params):
"""
initialise_players(team,teamname,params)
create a list of player objects that holds their positions and velocities from the tracking data dataframe
Parameters
-----------
team: row (i.e. instant) of either the home or away team tracking Dataframe
teamname: team name "Home" or "Away"
params: Dictionary of model parameters (default model parameters can be generated using default_model_params() )
Returns
-----------
team_players: list of player objects for the team at at given instant
"""
# get player ids
player_ids = np.unique([x.split("_")[0] for x in frame_data.keys()])
# create list
team_players = []
for p in player_ids:
# create a player object for player_id 'p'
team_player = player(p, frame_data, params)
if team_player.inframe:
team_players.append(team_player)
return team_players | 4126ba5cf1cdcd61017692260026dbdd03523874 | 22,787 |
import gc
def read_edgelist(f, directed=True, sep=r"\s+", header=None, keep_default_na=False, **readcsvkwargs):
"""
Creates a csrgraph from an edgelist.
The edgelist should be in the form
[source destination]
or
[source destination edge_weight]
The first column needs to be the source, the second the destination.
If there is a third column it's assumed to be edge weights.
Otherwise, all arguments from pandas.read_csv can be used to read the file.
f : str
Filename to read
directed : bool
Whether the graph is directed or undirected.
All csrgraphs are directed, undirected graphs simply add "return edges"
sep : str
CSV-style separator. Eg. Use "," if comma separated
header : int or None
pandas read_csv parameter. Use if column names are present
keep_default_na: bool
pandas read_csv argument to prevent casting any value to NaN
read_csv_kwargs : keyword arguments for pd.read_csv
Pass these kwargs as you would normally to pd.read_csv.
Returns : csrgraph
"""
# Read in csv correctly to each column
elist = pd.read_csv(
f, sep=sep, header=header,
keep_default_na=keep_default_na, **readcsvkwargs
)
if len(elist.columns) == 2:
elist.columns = ['src', 'dst']
elist['weight'] = np.ones(elist.shape[0])
elif len(elist.columns) == 3:
elist.columns = ['src', 'dst', 'weight']
else:
raise ValueError(f"""
Invalid columns: {elist.columns}
Expected 2 (source, destination)
or 3 (source, destination, weight)
Read File: \n{elist.head(5)}
""")
# Create name mapping to normalize node IDs
# Somehow this is 1.5x faster than np.union1d. Shame on numpy.
allnodes = list(
set(elist.src.unique())
.union(set(elist.dst.unique())))
# Factor all nodes to unique IDs
names = (
pd.Series(allnodes).astype('category')
.cat.categories
)
nnodes = names.shape[0]
# Get the input data type
if nnodes > UINT16_MAX:
dtype = np.uint32
if nnodes > UINT32_MAX:
dtype = np.uint64
else:
dtype = np.uint16
name_dict = dict(zip(names,
np.arange(names.shape[0], dtype=dtype)))
elist.src = elist.src.map(name_dict).astype(dtype)
elist.dst = elist.dst.map(name_dict).astype(dtype)
# clean up temp data
allnodes = None
name_dict = None
gc.collect()
# If undirected graph, append edgelist to reversed self
if not directed:
other_df = elist.copy()
other_df.columns = ['dst', 'src', 'weight']
elist = pd.concat([elist, other_df])
other_df = None
gc.collect()
# Need to sort by src for _edgelist_to_graph
elist = elist.sort_values(by='src')
# extract numpy arrays and clear memory
src = elist.src.to_numpy()
dst = elist.dst.to_numpy()
weight = elist.weight.to_numpy()
elist = None
gc.collect()
G = methods._edgelist_to_graph(
src, dst, weight,
nnodes, nodenames=names
)
return G | dd4110700857c3deb86c53a176ab93b0366cd900 | 22,788 |
def is_comment(txt_row):
""" Tries to determine if the current line of text is a comment line.
Args:
txt_row (string): text line to check.
Returns:
True when the text line is considered a comment line, False if not.
"""
if (len(txt_row) < 1):
return True
if ((txt_row[0] == '(') and (txt_row[len(txt_row) - 1] == ')')):
return True
else:
return False | db54b90053244b17ec209ed1edb1905b62151165 | 22,789 |
import json
def updateBillingPlanPaymentDefinition(pk, paypal_payment_definition):
"""Update an existing payment definition of a billing plan
:param pk: the primary key of the payment definition (associated with a billing plan)
:type pk: integer
:param paypal_payment_definition: Paypal billing plan payment definition
:type paypal_payment_definition: object
:returns: True for successful update or False in any other case
:rtype: bool
"""
try:
try:
frequency_interval = paypal_payment_definition['frequency_interval']
except:
frequency_interval = None
try:
cycles = paypal_payment_definition['cycles']
except:
cycles = None
try:
charge_models = paypal_payment_definition['charge_models']
except:
charge_models = dict()
try:
amount_value = paypal_payment_definition['amount']['value']
except:
amount_value = None
try:
amount_currency = paypal_payment_definition['amount']['currency']
except:
amount_currency = None
BillingPlanPaymentDefinition.objects.filter(pk=pk).update(
name=paypal_payment_definition['name'],
type=paypal_payment_definition['type'],
frequency=paypal_payment_definition['frequency'],
frequency_interval=frequency_interval,
cycles=cycles,
charge_models=json.dumps(utilities.object2dict(charge_models, False)),
amount_value=amount_value,
amount_currency=amount_currency,
json=json.dumps(utilities.object2dict(paypal_payment_definition, False))
)
return True
except Exception as ex:
log.error("Error in billing plan's payment definition modification (pk:=%d): %s" % (pk, str(ex)) )
return False | b4bf58088c8e501ccf380dda98587467a8683ff9 | 22,790 |
from typing import List
def format_float_list(array: List[float], precision: int = 4) -> List[str]:
"""
Formats a list of float values to a specific precision.
:param array: A list of float values to format.
:param precision: The number of decimal places to use.
:return: A list of strings containing the formatted floats.
"""
return [format_float(f, precision) for f in array] | b790379327acc5ebdf54f99621a06edd6228941d | 22,791 |
import html
def counts_card() -> html.Div:
"""Return the div that contains the overall count of patients/studies/images."""
return html.Div(
className="row",
children=[
html.Div(
className="four columns",
children=[
html.Div(
className="card gold-left-border",
children=html.Div(
className="container",
children=[
html.H4(id="patient-count", children=""),
html.P(children="patients"),
],
),
)
],
),
html.Div(
className="four columns",
children=[
html.Div(
className="card green-left-border",
children=html.Div(
className="container",
children=[
html.H4(id="study-count", children=""),
html.P(children="studies"),
],
),
)
],
),
html.Div(
className="four columns",
children=[
html.Div(
className="card purple-left-border",
children=html.Div(
className="container",
children=[
html.H4(id="image-count", children=""),
html.P(children="images"),
],
),
)
],
),
],
) | f80ba28b7ef1b2407d6a8b3e8eaccf26c734566a | 22,792 |
import logging
def validate_est(est: EstData, include_elster_responses: bool = False):
"""
Data for a Est is validated using ERiC. If the validation is successful then this should return
a 200 HTTP response with {'success': bool, 'est': est}. Otherwise this should return a 400 response if the
validation failed with {‘code’ : int,‘message’: str,‘description’: str,‘‘validation_problems’ : [{‘code’: int,
‘message’: str}]} or a 400 response for other client errors and a 500 response for server errors with {‘code’ :
int, ‘message’: str, ‘description’: str}
:param est: the JSON input data for the ESt
:param include_elster_responses: query parameter which indicates whether the ERiC/Server response are returned
"""
try:
request = EstValidationRequestController(est, include_elster_responses)
result = request.process()
if "transferticket" in result:
result["transfer_ticket"] = result.pop("transferticket")
return result
except EricProcessNotSuccessful as e:
logging.getLogger().info("Could not validate est", exc_info=True)
raise HTTPException(status_code=422, detail=e.generate_error_response(include_elster_responses)) | 2565572efd9b1ee52fabb98473b7934e13b691ca | 22,793 |
from typing import Callable
from typing import List
async def list_solver_releases(
solver_key: SolverKeyId,
user_id: int = Depends(get_current_user_id),
catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)),
url_for: Callable = Depends(get_reverse_url_mapper),
):
""" Lists all releases of a given solver """
releases: List[Solver] = await catalog_client.list_solver_releases(
user_id, solver_key
)
for solver in releases:
solver.url = url_for(
"get_solver_release", solver_key=solver.id, version=solver.version
)
return sorted(releases, key=attrgetter("pep404_version")) | 0461e6ba72c01e789af8571d75b7da21d2f17801 | 22,794 |
import copy
def offset_perimeter(geometry, offset, side='left', plot_offset=False):
"""Offsets the perimeter of a geometry of a :class:`~sectionproperties.pre.sections.Geometry`
object by a certain distance. Note that the perimeter facet list must be entered in a
consecutive order.
:param geometry: Cross-section geometry object
:type geometry: :class:`~sectionproperties.pre.sections.Geometry`
:param float offset: Offset distance for the perimeter
:param string side: Side of the perimeter offset, either 'left' or 'right'. E.g. 'left' for a
counter-clockwise offsets the perimeter inwards.
:param bool plot_offset: If set to True, generates a plot comparing the old and new geometry
The following example 'corrodes' a 200UB25 I-section by 1.5 mm and compares a few of the
section properties::
import sectionproperties.pre.sections as sections
from sectionproperties.pre.offset import offset_perimeter
from sectionproperties.analysis.cross_section import CrossSection
# calculate original section properties
original_geometry = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=16)
original_mesh = original_geometry.create_mesh(mesh_sizes=[3.0])
original_section = CrossSection(original_geometry, original_mesh)
original_section.calculate_geometric_properties()
original_area = original_section.get_area()
(original_ixx, _, _) = original_section.get_ic()
# calculate corroded section properties
corroded_geometry = offset_perimeter(original_geometry, 1.5, plot_offset=True)
corroded_mesh = corroded_geometry.create_mesh(mesh_sizes=[3.0])
corroded_section = CrossSection(corroded_geometry, corroded_mesh)
corroded_section.calculate_geometric_properties()
corroded_area = corroded_section.get_area()
(corroded_ixx, _, _) = corroded_section.get_ic()
# compare section properties
print("Area reduction = {0:.2f}%".format(
100 * (original_area - corroded_area) / original_area))
print("Ixx reduction = {0:.2f}%".format(
100 *(original_ixx - corroded_ixx) / original_ixx))
The following plot is generated by the above example:
.. figure:: ../images/offset_example.png
:align: center
:scale: 75 %
200UB25 with 1.5 mm corrosion.
The following is printed to the terminal:
.. code-block:: text
Area reduction = 41.97%
Ixx reduction = 39.20%
"""
# initialise perimeter points list
perimeter_points = []
# add perimeter points to the list
for facet_idx in geometry.perimeter:
# get the facet
facet = geometry.facets[facet_idx]
# get the first point on the facet
point = geometry.points[facet[0]]
# add the (x,y) tuple to the list
perimeter_points.append((point[0], point[1]))
# create LinearRing object
perimeter = LinearRing(perimeter_points)
# offset perimeter
new_perimeter = perimeter.parallel_offset(
distance=offset, side=side, resolution=0, join_style=2
)
(new_xcoords, new_ycoords) = new_perimeter.xy
# create deep copy of original geometry object
new_geometry = copy.deepcopy(geometry)
# replace offset points in new geometry
for (i, facet_idx) in enumerate(new_geometry.perimeter):
# get the facet
facet = new_geometry.facets[facet_idx]
# get the first point on the facet
point = new_geometry.points[facet[0]]
# replace the point location with the offset location
point[0] = new_xcoords[i]
point[1] = new_ycoords[i]
if plot_offset:
(fig, ax) = plt.subplots()
# plot new geometry
for (i, f) in enumerate(new_geometry.facets):
if i == 0:
ax.plot([new_geometry.points[f[0]][0], new_geometry.points[f[1]][0]],
[new_geometry.points[f[0]][1], new_geometry.points[f[1]][1]],
'ko-', markersize=2, label='Offset Geometry')
else:
ax.plot([new_geometry.points[f[0]][0], new_geometry.points[f[1]][0]],
[new_geometry.points[f[0]][1], new_geometry.points[f[1]][1]],
'ko-', markersize=2)
# plot the original perimeter
for (i, facet_idx) in enumerate(geometry.perimeter):
f = geometry.facets[facet_idx]
if i == 0:
ax.plot([geometry.points[f[0]][0], geometry.points[f[1]][0]],
[geometry.points[f[0]][1], geometry.points[f[1]][1]],
'r--', markersize=2, label='Original Perimeter')
else:
ax.plot([geometry.points[f[0]][0], geometry.points[f[1]][0]],
[geometry.points[f[0]][1], geometry.points[f[1]][1]],
'r--', markersize=2)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title('Offset Geometry')
ax.set_aspect('equal', anchor='C')
plt.tight_layout()
plt.show()
return new_geometry | e8b2851ea5ffd17faeb62bd3ca094e4cb6dd162a | 22,796 |
def WalterComposition(F,P):
"""
Calculates the melt composition generated as a function of F and P, using the
parameterisation of Duncan et al. (2017).
Parameters
-----
F: float
Melt fraction
P: float
Pressure in GPa
Returns
-----
MeltComposition: series
Major elements in wt%
T: float
Temperatures associated with the melt in C
"""
if isinstance(F,float):
F = np.array([F])
P = np.array([P])
if isinstance(F,list):
F = np.array(F)
P = np.array(P)
comp = pd.DataFrame(np.array([P,F]).T,columns=['P','X'])
F = F*100
if F[F.argmin()] == 0:
F[F.argmin()] = F[F.argmin()+1]
comp['SiO2'] = ((-2.137e-5*P-9.83e-4)*F**2 + (5.975e-3*P+8.513e-2)*F
+(-4.236e-1*P+4.638e1))
comp['Al2O3'] = ((-1.908e-4*P-1.366e-4)*F**2+(4.589e-2*P-1.525e-1)*F
+(-2.685*P+2.087e1))
comp['FeO'] = ((2.365e-4*P-8.492e-4)*F**2+(-3.723e-2*P+1.1e-1)*F
+(1.366*P+5.488))
comp['MgO'] = ((-8.068e-5*P+1.747e-3)*F**2+(-1.268e-2*P+9.761e-2)*F
+(2.12*P+9.914))
comp['CaO'] = ((4.305e-5*P-4.513e-4)*F**2+(1.104e-3*P-4.948e-2)*F
+(-5.564e-1*P+1.294e1))
comp['TiO2'] = 12.370*F**-0.917
comp['Na2O'] = 5.339*F**-0.654
comp['K2O'] = 6.612*F**-0.923
limTiO2 = 12.370*3**-0.917
limNa2O = 5.339*3**-0.654
limK2O = 6.612*3**-0.923
comp.TiO2[comp.TiO2>limTiO2] = limTiO2
comp.Na2O[comp.Na2O>limNa2O] = limNa2O
comp.K2O[comp.K2O>limK2O] = limK2O
comp['Cr2O3'] = -7.86e-5*F**2 + 9.705e-3*F + 2.201e-1
comp['MnO'] = -6.757e-6*F**2 + -2.04e-5*F + 2.014e-1
T = ((8.597e-3*P-1.963e-2)*F**2 + (-1.374*P+7.383)*F + 9.369e1*P + 1.177e3)
return comp, T | 54b2b5d6e6f4500da1c17e54bcb9b8804a65e9b0 | 22,797 |
from pathlib import Path
def fetch_study_metadata(
data_dir: Path, version: int = 7, verbose: int = 1
) -> pd.DataFrame:
"""
Download if needed the `metadata.tsv.gz` file from Neurosynth and load
it into a pandas DataFrame.
The metadata table contains the metadata for each study. Each study (ID)
is stored on its own line. These IDs are in the same order as the id
column of the associated `coordinates.tsv.gz` file, but the rows will
differ because the coordinates file will contain multiple rows per
study. They are also in the same order as the rows in the
`features.npz` files for the same version.
The metadata will therefore have N rows, N being the number of studies
in the Neurosynth dataset. The columns (for version 7) are:
- id
- doi
- space
- title
- authors
- year
- journal
Parameters
----------
data_dir : Path
the path for the directory where downloaded data should be saved.
version : int, optional
the neurosynth data version, by default 7
verbose : int, optional
verbose param for nilearn's `_fetch_files`, by default 1
Returns
-------
pd.DataFrame
the study metadata dataframe
"""
metadata_filename = f"data-neurosynth_version-{version}_metadata.tsv.gz"
metadata_file = _fetch_files(
data_dir,
[
(
metadata_filename,
NS_DATA_URL + metadata_filename,
{},
),
],
verbose=verbose,
)[0]
metadata = pd.read_table(metadata_file)
return metadata | e04a0dd631f8b8a53708118134b8d5039e83bcdf | 22,798 |
from typing import Union
def proveFormula(formula: str) -> Union[int, str]:
"""
Implements proveFormula according to grader.py
>>> proveFormula('p')
1
>>> proveFormula('(NOT (NOT (NOT (NOT not)) )\t)')
1
>>> proveFormula('(NOT (NOT (NOT (NOT not)) )')
'E'
>>> proveFormula('(IF p p)')
'T'
>>> proveFormula('(AND p (NOT p))')
'U'
>>> proveFormula('(OR p (NOT q))')
3
"""
ast = parse(formula)
if ast is None:
return 'E'
result = determine_satisfiability(ast)
if result is True:
return 'T'
if result is False:
return 'U'
return result | 4c078bdfa586b9807b6265db43ee7187e6aef349 | 22,799 |
def sendSingleCommand(server, user, password, command):
"""Wrapper function to open a connection and execute a single command.
Args:
server (str): The IP address of the server to connect to.
username (str): The username to be used in the connection.
password (str): The password associated with the user.
command (str): The command to be executed.
Returns:
String: String containing the command output.
"""
# Open SSH connection
channel = openChannel(server, user, password)
output = ""
try:
output = sendCommand(channel, command)
finally:
# Close ssh connection
closeChannel(channel)
return output | 78a339b2bcb320ad81e79b8656867b894be22ecd | 22,801 |
def test_piecewise_fermidirac(precision):
"""Creates a Chebyshev approximation of the Fermi-Dirac distribution within
the interval (-3, 3), and tests its accuracy for scalars, matrices, and
distributed matrices.
"""
mu = 0.0
beta = 10.0
def f(x):
return 1 / (np.exp(beta * (x - mu)) + 1)
is_vectorized = True
interval = (-3, 3)
n_cheb = 200
# The first one comes from Chebyshev error, the latter from numerical.
rtol = max(5e-6, 10 * testutils.eps(precision))
atol = max(5e-6, 10 * testutils.eps(precision))
test_samples = 1000
test_margin = 0
p_sz = 16
D = 128
dtype = np.float32
M = random_self_adjoint(D, dtype)
# Make sure the spectrum of M is within the interval.
interval_range = max(abs(i) for i in interval)
M = M / (jnp.linalg.norm(M) / interval_range)
v = np.random.randn(D, 1).astype(dtype)
chebyshev_test(
f,
interval,
M,
v,
n_cheb,
is_vectorized,
atol,
rtol,
test_samples,
test_margin,
p_sz,
precision=precision,
) | 587b7acfc5a114677f1bf5ab5a72a9f2019c6063 | 22,802 |
def load_img(flist):
""" Loads images in a list of arrays
Args : list of files
Returns list of all the ndimage arrays """
rgb_imgs = []
for i in flist:
rgb_imgs.append(cv2.imread(i, -1)) # flag <0 to return img as is
print "\t> Batch import of N frames\t", len(rgb_imgs)
size_var = cv2.imread(i) # (height, width, channels)
return rgb_imgs, size_var | 74d3c312e936f434b3738eae79b8f499755cdd0a | 22,803 |
import astropy.io.fits as pyfits
def makesimpleheader(headerin,naxis=2,radesys=None,equinox=None,pywcsdirect=False):
"""
Function to make a new 'simple header' from the WCS information in the input header.
Parameters
----------
headerin : astropy.io.fits.header
Header object
naxis : int
Specifies how many axes the final header should have. Default=2
radesys :str
RA/DEC system to use (valid SkyCoord frame system, e.g. 'icrs')
equinox : str
Equinox to use for the output header
pywcsdirect : bool
True to create the header directly with astropy.wcs.WCS
Returns
-------
astropy.io.fits.header
Output header
"""
if type(headerin)==str:
headerin=pyfits.getheader(headerin)
if pywcsdirect==True: wcstemp=pywcs.WCS(header=headerin)
else:
wcstemp=pywcs.WCS(naxis=naxis);
if naxis>2:
wcstemp.wcs.crpix=[float(headerin['CRPIX1']),float(headerin['CRPIX2']),float(headerin['CRPIX3'])]
wcstemp.wcs.crval=[float(headerin['CRVAL1']),float(headerin['CRVAL2']),float(headerin['CRVAL3'])]
wcstemp.wcs.ctype=[headerin['CTYPE1'],headerin['CTYPE2'],headerin['CTYPE3']]
try: wcstemp.wcs.cunit=[headerin['CUNIT1'],headerin['CUNIT2'],headerin['CUNIT3']]
except: pass
try: wcstemp.wcs.cdelt=list(getcdelts(headerin))+[headerin['CDELT3']];
except: raise(Exception('Invalid WCS CDELTS'))
else:
wcstemp.wcs.crpix=[float(headerin['CRPIX1']),float(headerin['CRPIX2'])]
wcstemp.wcs.crval=[float(headerin['CRVAL1']),float(headerin['CRVAL2'])]
wcstemp.wcs.ctype=[headerin['CTYPE1'],headerin['CTYPE2']]
try: wcstemp.wcs.cunit=[headerin['CUNIT1'],headerin['CUNIT2']]
except: pass
try: wcstemp.wcs.cdelt=list(getcdelts(headerin));
except: raise(Exception('Invalid WCS CDELTS'))
try: crota=getcdelts(headerin,getrot=True)[-1] #degrees, from N
except: raise(Exception('Invalid WCS params for CROTAx'))
#if crota!=0.: wcstemp.wcs.crota=[crota]*2 #Header will include PC_x cards if crot not 0
try: wcstemp.wcs.radesys=headerin['RADESYS']
except: pass
try: wcstemp.wcs.equinox=headerin['EQUINOX']
except: pass
if radesys is not None: wcstemp.wcs.radesys=radesys; #e.g. 'FK5', 'ICRS'. For manually forcing string, not true reprojection.
if equinox is not None: wcstemp.wcs.equinox=equinox; #e.g. 2000.0
simpleheader=wcstemp.to_header()
if pywcsdirect==False:
if crota!=0.: simpleheader['CROTA2']=crota #Alternative method to just use (deprecated) CROTA2 card
simpleheader['NAXIS']=naxis;
try: simpleheader['NAXIS1']=int(headerin['NAXIS1']); simpleheader['NAXIS2']=int(headerin['NAXIS2']);
except: pass
if naxis>2:
for card in ['NAXIS3','CRPIX3','CRVAL3','CDELT3','CTYPE3','CUNIT3', 'SPECSYS','ALTRVAL','ALTRPIX']:
try: simpleheader[card]=headerin[card]
except: pass
for card in ['CROTA','CROTA1','CROTA2','BSCALE','BZERO','ZSCALE','BMAJ','BMIN','BPA', 'JANSCALE','FLUXCONV',
'WAVELEN','FREQ', 'RESTFRQ', 'LATPOLE','LONPOLE']:
try: simpleheader[card]=float(headerin[card])
except: pass
for card in ['BUNIT','OBJECT','TELESCOP','ZUNITS','SPECSYS']:
try: simpleheader[card]=headerin[card]
except: pass
return simpleheader | ffdc8f755227451e3df2329e8ec804b7444a553d | 22,804 |
def _callcatch(ui, func):
"""like scmutil.callcatch but handles more high-level exceptions about
config parsing and commands. besides, use handlecommandexception to handle
uncaught exceptions.
"""
detailed_exit_code = -1
try:
return scmutil.callcatch(ui, func)
except error.AmbiguousCommand as inst:
detailed_exit_code = 10
ui.warn(
_(b"hg: command '%s' is ambiguous:\n %s\n")
% (inst.prefix, b" ".join(inst.matches))
)
except error.CommandError as inst:
detailed_exit_code = 10
if inst.command:
ui.pager(b'help')
msgbytes = pycompat.bytestr(inst.message)
ui.warn(_(b"hg %s: %s\n") % (inst.command, msgbytes))
commands.help_(ui, inst.command, full=False, command=True)
else:
ui.warn(_(b"hg: %s\n") % inst.message)
ui.warn(_(b"(use 'hg help -v' for a list of global options)\n"))
except error.UnknownCommand as inst:
detailed_exit_code = 10
nocmdmsg = _(b"hg: unknown command '%s'\n") % inst.command
try:
# check if the command is in a disabled extension
# (but don't check for extensions themselves)
formatted = help.formattedhelp(
ui, commands, inst.command, unknowncmd=True
)
ui.warn(nocmdmsg)
ui.write(formatted)
except (error.UnknownCommand, error.Abort):
suggested = False
if inst.all_commands:
sim = error.getsimilar(inst.all_commands, inst.command)
if sim:
ui.warn(nocmdmsg)
ui.warn(b"(%s)\n" % error.similarity_hint(sim))
suggested = True
if not suggested:
ui.warn(nocmdmsg)
ui.warn(_(b"(use 'hg help' for a list of commands)\n"))
except IOError:
raise
except KeyboardInterrupt:
raise
except: # probably re-raises
if not handlecommandexception(ui):
raise
if ui.configbool(b'ui', b'detailed-exit-code'):
return detailed_exit_code
else:
return -1 | 495531b930187f1d3aff329453235f9683bc25bc | 22,805 |
def _determ_estim_update(new_bit, counts):
"""Beliefs only a sequence of all ones or zeros.
"""
new_counts = counts[:]
new_counts[new_bit] += 1
if new_counts[0] > 0 and new_counts[1] > 0:
return LOG_ZERO
log_p_new = _determ_log_p(new_counts)
log_p_old = _determ_log_p(counts)
return log_p_new - log_p_old | ea6f172161b215d5d474241da18fdd222692f245 | 22,806 |
def get_projects(config):
"""Find all XNAT projects and the list of scan sites uploaded to each one.
Args:
config (:obj:`datman.config.config`): The config for a study
Returns:
dict: A map of XNAT project names to the URL(s) of the server holding
that project.
"""
projects = {}
for site in config.get_sites():
xnat_project = config.get_key("XnatArchive", site=site)
projects.setdefault(xnat_project, set()).add(site)
return projects | 09824b67e73f8190d777ec782454940f27b70e33 | 22,807 |
import json
def load_jsonrpc_method(name):
"""Load a method based on the file naming conventions for the JSON-RPC.
"""
base_path = (repo_root() / "doc" / "schemas").resolve()
req_file = base_path / f"{name.lower()}.request.json"
resp_file = base_path / f"{name.lower()}.schema.json"
request = CompositeField.from_js(json.load(open(req_file)), path=name)
response = CompositeField.from_js(json.load(open(resp_file)), path=name)
# Normalize the method request and response typename so they no
# longer conflict.
request.typename += "Request"
response.typename += "Response"
return Method(
name=method_name_override.get(name, name),
request=request,
response=response,
) | 173fdaad563989042f6ff3c5622c4b56be1a5fa5 | 22,808 |
def client_decrypt_hello_reply(ciphertext, iv1, key1):
"""
Decrypt the server's reply using the IV and key we sent to it.
Returns iv2, key2, salt2 (8 bytes), and the original salt1.
The pair iv2/key2 are to be used in future communications.
Salt1 is returned to help confirm the integrity of the operation.
"""
iv1 = bytes(iv1)
key1 = bytes(key1)
# iv_ = ciphertext[0:AES_BLOCK_BYTES] # of no interest
cipher = Cipher(
algorithms.AES(key1),
modes.CBC(iv1),
backend=default_backend())
decryptor = cipher.decryptor()
plaintext = decryptor.update(ciphertext) + decryptor.finalize()
# unpadded = strip_pkcs7_padding(plaintext, AES_BLOCK_BYTES)
unpadder = padding.PKCS7(AES_BLOCK_BITS).unpadder()
unpadded = unpadder.update(plaintext) + unpadder.finalize()
iv2 = unpadded[:AES_BLOCK_BYTES]
key2 = unpadded[AES_BLOCK_BYTES: 3 * AES_BLOCK_BYTES]
salt2 = unpadded[3 * AES_BLOCK_BYTES: 3 * AES_BLOCK_BYTES + 8]
salt1 = unpadded[3 * AES_BLOCK_BYTES + 8: 3 * AES_BLOCK_BYTES + 16]
v_bytes = unpadded[3 * AES_BLOCK_BYTES + 16: 3 * AES_BLOCK_BYTES + 20]
version2 = v_bytes[0] |\
(v_bytes[1] << 8) |\
(v_bytes[2] << 16) |\
(v_bytes[3] << 24)
return iv2, key2, salt2, salt1, version2 | 70f3361acbeaa26376d4a54605a526ecac5ea61e | 22,810 |
import pandas
def load_labeled_data(filename):
""" Loads data from a csv, where the last column is the label of the data in that row
:param filename: name of the file to load
:return: data frames and labels in separate arrays
"""
dataframe = pandas.read_csv(filename, header=None)
dataset = dataframe.values
data = dataset[:, 0:-1].astype(float)
labels = dataset[:, -1]
return data, labels | 727691d376b744ccfdffbd62dd9f386e7bd7c4dd | 22,811 |
def _get_data_tuple(sptoks, asp_termIn, label):
"""
Method obtained from Trusca et al. (2020), no original docstring provided.
:param sptoks:
:param asp_termIn:
:param label:
:return:
"""
# Find the ids of aspect term.
aspect_is = []
asp_term = ' '.join(sp for sp in asp_termIn).lower()
for _i, group in enumerate(window(sptoks, len(asp_termIn))):
if asp_term == ' '.join([g.lower() for g in group]):
aspect_is = list(range(_i, _i + len(asp_termIn)))
break
elif asp_term in ' '.join([g.lower() for g in group]):
aspect_is = list(range(_i, _i + len(asp_termIn)))
break
pos_info = []
for _i, sptok in enumerate(sptoks):
pos_info.append(min([abs(_i - i) for i in aspect_is]))
lab = None
if label == 'negative':
lab = -1
elif label == 'neutral':
lab = 0
elif label == "positive":
lab = 1
else:
raise ValueError("Unknown label: %s" % lab)
return pos_info, lab | 2dae699ba4da27f6a36b7aac21cc8bc759a71d67 | 22,814 |
from pathlib import Path
def setup_environment(new_region: Path) -> bool:
"""Try to create new_region folder"""
if new_region.exists():
print(f"{new_region.resolve()} exists, this may cause problems")
proceed = input("Do you want to proceed regardless? [y/N] ")
sep()
return proceed.startswith("y")
new_region.mkdir()
print(f"Saving newly generated region files to {new_region.resolve()}")
return True | 175e21b10aca860d9886841be743d8f2a240dfc6 | 22,815 |
def headers():
"""Default headers for making requests."""
return {
'content-type': 'application/json',
'accept': 'application/json',
} | 53e42df6cae8ba9cbdc5f0e0a86a0154d3ba360e | 22,817 |
def merge_two_lists(l1: ListNode, l2: ListNode) -> ListNode:
"""Returns a single sorted, in-place merged linked list of two sorted input linked lists
The linked list is made by splicing together the nodes of l1 and l2
Args:
l1:
l2:
Examples:
>>> l1 = linked_list.convert_list_to_linked_list([1,2,4])
>>> l2 = linked_list.convert_list_to_linked_list([1,3,4])
>>> merge_two_lists(l1, l2).as_list()
[1, 1, 2, 3, 4, 4]
>>> l1 = linked_list.convert_list_to_linked_list([])
>>> l2 = linked_list.convert_list_to_linked_list([0])
>>> merge_two_lists(l1, l2).as_list()
[0]
>>> merge_two_lists(l2, l1).as_list()
[0]
>>> merge_two_lists(None, None)
"""
"""ALGORITHM"""
head_handle = curr = ListNode(None)
while l1 is not None and l2 is not None:
if l1.val <= l2.val:
curr.next, l1 = l1, l1.next
else:
curr.next, l2 = l2, l2.next
curr = curr.next
# Post-condition:
# if at least one list was not None, one list is now exhausted and `curr`
# is the last node of the now exhausted list; complete splice by assigning
# the head of the remaining non-exhausted list to `curr.next`
curr.next = l1 if l1 is not None else l2
return head_handle.next | 49033ef17e0940a201c70555cc0e49b8e745fb3b | 22,818 |
import csv
def map_SOPR_to_firm():
"""
Map SOPR identifiers to a lobbying CUID.
Return a dictionnary.
"""
firms = {}
with open(DATASET_PATH_TO['LOBBYING_FIRMS'], 'rb') as f:
reader = csv.reader(f, delimiter='%', quoting=csv.QUOTE_NONE)
for record in reader:
SOPR_reports = record[3].split(';')
CUID_firm = record[0]
for report_id in SOPR_reports:
firms[report_id] = CUID_firm
return firms | e0f00d7f720512eef3e32685bb8ba5ed4ed0203c | 22,819 |
from typing import Set
def specialbefores_given_external_square(
befores: Set[Before],
directly_playable_squares: Set[Square],
external_directly_playable_square: Square) -> Set[Specialbefore]:
"""
Args:
befores (Set[Before]): a set of Befores used to create Specialbefores.
directly_playable_squares (Set[Square]): a set of directly playable squares, possibly including square.
external_directly_playable_square (Square): a square to be used as the external directly playable
square of each Specialbefore.
Returns:
specialbefores (Set[Specialbefore]): a set of Specialbefores. Each Specialbefore uses square as its external
directly playable square.
"""
specialbefores = set()
for before in befores:
directly_playable_squares_in_before_group = internal_directly_playable_squares(
before, directly_playable_squares)
for internal_directly_playable_square in directly_playable_squares_in_before_group:
if can_be_used_with_before(external_directly_playable_square, before):
specialbefores.add(Specialbefore(
before=before,
internal_directly_playable_square=internal_directly_playable_square,
external_directly_playable_square=external_directly_playable_square,
))
return specialbefores | bb0405cc783ee94130893d0dca0f0b06e43d71c5 | 22,820 |
import pytest
def check_if_all_tests_pass(option='-x'):
"""Runs all of the tests and only returns True if all tests pass.
The -x option is the default, and -x will tell pytest to exit on the first encountered failure.
The -s option prints out stdout from the tests (normally hidden.)"""
options = [option]
arguments = options
exitcode = pytest.main(arguments)
all_passed = exitcode == 0
if not all_passed:
input()
return all_passed | 81e41cb985bcf346d9351d327d0ca0941ed7320e | 22,822 |
import http
def init(api, _cors, impl):
"""Configures REST handlers for allocation resource."""
namespace = webutils.namespace(
api, __name__, 'Local nodeinfo redirect API.'
)
@namespace.route('/<hostname>/<path:path>')
class _NodeRedirect(restplus.Resource):
"""Redirects to local nodeinfo endpoint."""
def get(self, hostname, path):
"""Returns list of local instances."""
hostport = impl.get(hostname)
if not hostport:
return 'Host not found.', http.client.NOT_FOUND
url = utils.encode_uri_parts(path)
return flask.redirect('http://%s/%s' % (hostport, url),
code=http.client.FOUND) | 77430c891ceac87bec3d8b1cfa46557fbc1fd9f5 | 22,823 |
def split_matrix_2(input1):
"""
Split matrix.
Args:
inputs:tvm.Tensor of type float32.
Returns:
akg.tvm.Tensor of type float32 with 3d shape.
"""
dim = input1.shape[0]
split_num = dim // split_dim
result_3 = allocate((split_num, split_dim, split_dim), input1.dtype, 'local')
for i in range(split_num):
for j in range(split_dim):
for k in range(split_dim):
result_3[i,j,k] = input1[i * split_dim + j, i * split_dim + k]
return result_3 | 8ee5b4069c28166ef6181cb8c6ef1e21232239a4 | 22,824 |
import glob
def load_all(path, jobmanager=None):
"""Load all jobs from *path*.
This function works as a multiple execution of |load_job|. It searches for ``.dill`` files inside the directory given by *path*, yet not directly in it, but one level deeper. In other words, all files matching ``path/*/*.dill`` are used. That way a path to the main working folder of previously run script can be used to import all jobs run by that script.
The purpose of this function is to provide quick and easy way of restarting a script that previously failed. Loading all successful jobs from the previous run prevents double work and allows the script to proceed directly to the place where it failed.
Jobs are loaded using default job manager stored in ``config.jm``. If you wish to use a different one you can pass it as *jobmanager* argument of this function.
Returned value is a dictionary containing all loaded jobs as values and absolute paths to ``.dill`` files as keys.
"""
jm = jobmanager or config.jm
loaded_jobs = {}
for f in glob.glob(opj(path, '*', '*.dill')):
loaded_jobs[f] = jm.load_job(f)
return loaded_jobs | f76e2baeaa0b35283eed4748d68403827bdaff97 | 22,825 |
import numpy
def relative_error(estimate, exact):
"""
Compute the relative error of an estimate, in percent.
"""
tol = 1e-15
if numpy.abs(exact) < tol:
if numpy.abs(estimate - exact) < tol:
relative_error = 0.0
else:
relative_error = numpy.inf
else:
relative_error = numpy.abs((estimate - exact) / exact) * 100.0
return relative_error | 4170fd4a7c448eb312ea9f42d436d12acd828695 | 22,826 |
def list_users(cursor):
"""
Returns the current roles
"""
cursor.execute(
"""
SELECT
r.rolname AS name,
r.rolcanlogin AS login,
ARRAY(
SELECT b.rolname
FROM pg_catalog.pg_auth_members m
JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid)
WHERE m.member = r.oid
) AS memberof
FROM pg_catalog.pg_roles r
"""
)
return map(User.create, cursor.fetchall()) | b51efbae5da08089987e3bc2753e1da3c13ee365 | 22,828 |
def get_job_exe_output_vol_name(job_exe):
"""Returns the container output volume name for the given job execution
:param job_exe: The job execution model (must not be queued) with related job and job_type fields
:type job_exe: :class:`job.models.JobExecution`
:returns: The container output volume name
:rtype: string
:raises Exception: If the job execution is still queued
"""
return '%s_output_data' % job_exe.get_cluster_id() | c625b596f9ee819eb0c7afc9aed1328ecef0e206 | 22,830 |
def bytes2human(n, format='%(value).1f %(symbol)s', symbols='customary'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> bytes2human(0)
'0.0 B'
>>> bytes2human(0.9)
'0.0 B'
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1.9)
'1.0 B'
>>> bytes2human(1024)
'1.0 K'
>>> bytes2human(1048576)
'1.0 M'
>>> bytes2human(1099511627776127398123789121)
'909.5 Y'
>>> bytes2human(9856, symbols="customary")
'9.6 K'
>>> bytes2human(9856, symbols="customary_ext")
'9.6 kilo'
>>> bytes2human(9856, symbols="iec")
'9.6 Ki'
>>> bytes2human(9856, symbols="iec_ext")
'9.6 kibi'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = SYMBOLS[symbols]
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i+1)*10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n) | 21d3f52fe60a25a860c8350c20e0b43209802751 | 22,831 |
def check_if_free(driver, available, movie_hulu_url):
"""
Check if "Watch Movie" button is there
if not, it's likely available in a special package (Starz etc) or availabe for Rent on Hulu.
"""
is_free = False
if available:
driver.get(movie_hulu_url)
sleep(3)
watch_movie_button = driver.find_elements_by_class_name("WatchAction")
for e in watch_movie_button:
#print(e.text)
#print(e.get_attribute('href'))
if e.text == "WATCH MOVIE":
is_free = True
return is_free | e27d62538e5bf9c416bcaedb4b7c5c4706493ba0 | 22,832 |
def scatter_add(data, indices, updates, axis=0):
"""Update data by adding values in updates at positions defined by indices
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The index locations to update.
updates : relay.Expr
The values to be added.
axis : int
The axis to scatter on
Returns
-------
ret : relay.Expr
The computed result.
"""
if axis < 0:
axis += len(data.shape)
assert axis >= 0
assert axis < len(data.shape)
rank = len(data.shape)
assert 1 <= rank <= 4, "scatter_add only supports 1-4 dimensions"
ir_funcs = {
1: gen_scatter_add_1d_atomic,
2: gen_ir_2d,
3: gen_ir_3d,
4: gen_ir_4d,
}
def update_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] += update
out_shape = data.shape
out_buf = tvm.tir.decl_buffer(out_shape, data.dtype, "out_buf")
out = te.extern(
[out_shape],
[data, indices, updates],
lambda ins, outs: ir_funcs[rank](ins[0], ins[1], ins[2], axis, outs[0], update_func),
dtype=data.dtype,
out_buffers=[out_buf],
name="scatter_add_gpu",
tag="scatter_add_gpu",
)
return out | 641d96562700553a2ed4a5c4df323d468bba1bd8 | 22,833 |
def generate_test_linked_list(size=5, singly=False):
"""
Generate node list for test case
:param size: size of linked list
:type size: int
:param singly: whether or not this linked list is singly
:type singly: bool
:return: value list and generated linked list
"""
assert size >= 1
val_list = [i for i in range(size)]
node_list = LinkedList(singly=singly)
node_list.append_val_list(val_list)
return val_list, node_list | 5d6d5fc3c6027cc18fd6da24a7cefc506e64eb2a | 22,834 |
def _bytes_to_long(bytestring, byteorder):
"""Convert a bytestring to a long
For use in python version prior to 3.2
"""
result = []
if byteorder == 'little':
result = (v << i * 8 for (i, v) in enumerate(bytestring))
else:
result = (v << i * 8 for (i, v) in enumerate(reversed(bytestring)))
return sum(result) | fcaa038b21aef2822ad7a513c28a7a2ed3c08cbc | 22,835 |
import contextlib
import ast
def SoS_exec(script: str, _dict: dict = None, return_result: bool = True) -> None:
"""Execute a statement."""
if _dict is None:
_dict = env.sos_dict.dict()
if not return_result:
if env.verbosity == 0:
with contextlib.redirect_stdout(None):
exec(
compile(script, filename=stmtHash.hash(script), mode="exec"), _dict
)
else:
exec(compile(script, filename=stmtHash.hash(script), mode="exec"), _dict)
return None
try:
stmts = list(ast.iter_child_nodes(ast.parse(script)))
if not stmts:
return
if isinstance(stmts[-1], ast.Expr):
# the last one is an expression and we will try to return the results
# so we first execute the previous statements
if len(stmts) > 1:
if env.verbosity == 0:
with contextlib.redirect_stdout(None):
exec(
compile(
ast.Module(body=stmts[:-1], type_ignores=[]),
filename=stmtHash.hash(script),
mode="exec",
),
_dict,
)
else:
exec(
compile(
ast.Module(body=stmts[:-1], type_ignores=[]),
filename=stmtHash.hash(script),
mode="exec",
),
_dict,
)
# then we eval the last one
if env.verbosity == 0:
with contextlib.redirect_stdout(None):
res = eval(
compile(
ast.Expression(body=stmts[-1].value),
filename=stmtHash.hash(script),
mode="eval",
),
_dict,
)
else:
res = eval(
compile(
ast.Expression(body=stmts[-1].value),
filename=stmtHash.hash(script),
mode="eval",
),
_dict,
)
else:
# otherwise we just execute the entire code
if env.verbosity == 0:
with contextlib.redirect_stdout(None):
exec(
compile(script, filename=stmtHash.hash(script), mode="exec"),
_dict,
)
else:
exec(
compile(script, filename=stmtHash.hash(script), mode="exec"), _dict
)
res = None
except SyntaxError as e:
raise SyntaxError(f"Invalid code {script}: {e}")
# if check_readonly:
# env.sos_dict.check_readonly_vars()
return res | c524aa064dfc396d421cdef9962d81ca79c7010b | 22,837 |
def aten_dim(mapper, graph, node):
""" 构造获取维度的PaddleLayer。
TorchScript示例:
%106 : int = aten::dim(%101)
参数含义:
%106 (int): 输出,Tensor的维度。
%101 (Tensor): 输入的Tensor。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%input.8
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.shape", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
graph.add_layer(
"prim.len", inputs={"input": output_name}, outputs=layer_outputs, scope_name=scope_name)
return current_inputs, current_outputs | 8037cc1943577aed2737aceee47b97b59c6a9244 | 22,838 |
def plot_LA(mobile, ref, GDT_TS, GDT_HA, GDT_ndx,
sel1="protein and name CA", sel2="protein and name CA",
cmap="GDT_HA", **kwargs):
"""
Create LocalAccuracy Plot (heatmap) with
- xdata = residue ID
- ydata = frame number
- color = color-coded pair distance
.. Note:: do not pass too many data points otherwise the plot will get squeezed
Args:
mobile (universe, atomgrp): mobile structure with trajectory
ref (universe, atomgrp): reference structure
GDT_TS (array): array with GDT_TS scores.
GDT_HA (array): array with GDT_HA scores.
GTD_ndx (array): array with corresponding index values (representative for frame numbers).
sel1 (str): selection string of mobile structure (calculation of pair distances)
sel2 (str): selection string of reference structure (calculation of pair distances)
cmap (str):
| "GDT_TS" or "TS": color map with new colors at values (0, 1, 2, 4, 8)
and vmin, vmax = (0, 10).
| "GDT_HA" or "HA": color map with new colors at values (0, .5, 1, 2, 4)
and vmin, vmax = (0, 5).
| "nucleic" or "RNA" or "DNA": color map with new colors at values (0, .5, 1, 2, 4)
and vmin, vmax = (0, 20).
| other cmap names: see help(plt.colormaps) or alternatively
https://matplotlib.org/examples/color/colormaps_reference.html
Keyword Args:
prec (None, int):
| rounding precision of scores
| None: rounding off
| int: rounding on to <int> decimals
ndx_offset (int):
| offset/shift of GDT_ndx to match real "mobile" frames. Defaults to 0.
| Look up "start" parameter during execution of gdt.GDT()
rank_num (int): plot only <rank_num> best ranked frames. Defaults to 30.
show_cbar (bool): show/hide colorbar. Defaults to True.
show_frames (bool): show/hide frame numbers. Defaults to False.
show_scores (bool): show/hide GDT_TS and GDT_HA scores. Defaults to True.
save_as (None, str): save name or realpath to save file. Defaults to None.
cbar_ticks (None, list): color bar tick positions. Defaults to None.
cbar_label/label (str)
cbar_fontweight/fontweight (str): "normal", "bold"
cbar_location/location (str): "right", "bottom", "left", "top"
cbar_orientation/orientation (str): "horizontal", "vertical"
cbar_min/vmin (None, int): min value of colorbar and heatmap. Gets
overwritten by cmaps such as "GDT_TS", "GDT_HA", "RNA" etc.
cbar_max/vmax (None, int): max value of colorbar and heatmap. Gets
overwritten by cmaps such as "GDT_TS", "GDT_HA", "RNA" etc.
text_pos_Frame (list): [x0, y0] position of the "Frame" text box (label)
text_pos_TS (list): [x0, y0] position of the "TS" text box (label)
text_pos_HA (list): [x0, y0] position of the "HA" text box (label)
font_scale (float)
.. Hint:: Args and Keyword of misc.figure() are also valid.
Returns:
fig (class)
matplotlib.figure.Figure
ax (class, list)
ax or list of axes ~ matplotlib.axes._subplots.Axes
LA_data (tuple)
| LA_data[0]: PairDistances (list)
| LA_data[1]: Frames (list)
Example:
| # obtain data
| >> GDT = gdt.GDT(mobile, ref, sss=[None,None,None])
| >> GDT_percent, GDT_resids, GDT_cutoff, RMSD, FRAME = GDT
|
| # rank data
| >> SCORES = gdt.GDT_rank_scores(GDT_percent, ranking_order="GDT_HA")
| >> GDT_TS_ranked, GDT_HA_ranked, GDT_ndx_ranked = SCORES
|
| # edit text box positions of labels "Frame", "TS", "HA"
| >>text_pos_kws = {"text_pos_Frame": [-8.8, -0.3],
| "text_pos_TS": [-4.2, -0.3],
| "text_pos_HA": [-1.9, -0.3]}
|
| # plot
| >> gdt.plot_LA(mobile, ref, SCORES[0], SCORES[1], SCORES[2], **text_pos_kws)
"""
# init CONFIG object with default parameter and overwrite them if kwargs contain the same keywords.
default = {"figsize": (7.5, 6),
"font_scale": 1.2,
"ndx_offset": 0,
"rank_num": 30,
"show_cbar": True,
"show_frames": False,
"show_scores": True,
"save_as": None,
"prec": 2,
"cmap": cmap,
"cbar_ticks": None,
"cbar_label": r"mobile-reference CA-CA distances ($\AA$)",
"cbar_fontweight": "bold",
"cbar_location": 'right',
"cbar_orientation": 'vertical',
"cbar_min": None,
"cbar_max": None,
"vmin": None,
"vmax": None,
"text_pos_Frame": [-8.8, -0.3],
"text_pos_TS": [-3.8, -0.3],
"text_pos_HA": [-1.7, -0.3]}
cfg = _misc.CONFIG(default, **kwargs)
cfg.update_by_alias(alias="label", key="cbar_label", **kwargs)
cfg.update_by_alias(alias="fontweight", key="cbar_fontweight", **kwargs)
cfg.update_by_alias(alias="location", key="cbar_location", **kwargs)
cfg.update_by_alias(alias="orientation", key="cbar_orientation", **kwargs)
cfg.update_by_alias(alias="vmin", key="cbar_min", **kwargs)
cfg.update_by_alias(alias="vmax", key="cbar_max", **kwargs)
############################################################################
### load data
PAIR_DISTANCES = []
FRAMES = [i+cfg.ndx_offset for i in GDT_ndx[:cfg.rank_num]]
for ts in mobile.trajectory[FRAMES]:
PD, *_ = get_Pair_Distances(mobile, ref, sel1=sel1, sel2=sel2)
PAIR_DISTANCES.append(PD)
if cfg.prec != None and cfg.prec != -1:
GDT_TS = np.around(GDT_TS[: cfg.rank_num], cfg.prec)
GDT_HA = np.around(GDT_HA[: cfg.rank_num], cfg.prec)
xticks = mobile.select_atoms(sel1).residues.resids
xticks = [x if x % 5 == 0 else "." for x in xticks]
xticklabels = xticks
if cfg.show_frames and cfg.show_scores:
yticks = [f"{FRAMES[i]:>9}{GDT_TS[i]:>10.2f}{GDT_HA[i]:>8.2f} " if GDT_TS[i] != 100 else
f"{FRAMES[i]:>9}{GDT_TS[i]:>9.2f}{GDT_HA[i]:>8.2f} " for i in range(len(FRAMES))]
elif cfg.show_frames:
yticks = FRAMES
elif cfg.show_scores:
yticks = [f"{GDT_TS[i]:>10.2f}{GDT_HA[i]:>8.2f} " if GDT_TS[i] != 100 else
f"{GDT_TS[i]:>9.2f}{GDT_HA[i]:>8.2f} " for i in range(len(FRAMES))]
yticklabels = yticks
############################################################################
### heatmap/cbar settings
cmap_GDT = ["lightblue", "lightgreen", "yellow", "yellow", "orange", "orange",
"orange", "orange", "red", "red"]
cmap_RNA = ["lightblue", "lightblue", "lightgreen", "lightgreen",
"yellow", "yellow", "orange", "orange", "red", "red"]
# apply color bar limits if passed (vmin and vmax have higher priority than cbar_min and cbar_max)
if cfg.cbar_min is not None:
cfg.vmin = cfg.cbar_min
if cfg.cbar_max is not None:
cfg.vmax = cfg.cbar_max
# if no limits passed: apply pre-defined limits
if cfg.cmap in ["GDT_HA", "HA"]:
if cfg.vmin is None:
cfg.vmin = 0.0
if cfg.vmax is None:
cfg.vmax = 5.0
elif cfg.cmap in ["GDT_TS", "TS"]:
if cfg.vmin is None:
cfg.vmin = 0.0
if cfg.vmax is None:
cfg.vmax = 10.0
elif cfg.cmap in ["nucleic", "rna", "dna", "RNA", "DNA"]:
if cfg.vmin is None:
cfg.vmin = 0.0
if cfg.vmax is None:
cfg.vmax = 14.0
############################################################################
### plot
fig, ax = _misc.figure(**cfg)
if cfg.show_cbar:
cbar_ax = _misc.add_cbar_ax(ax, location=cfg.cbar_location,
orientation=cfg.cbar_orientation)
cbar_kws = {'orientation': cfg.cbar_orientation}
else:
cbar_ax = None
cbar_kws = dict()
if cfg.cmap in ["GDT_TS", "TS", "GDT_HA", "HA"]:
hm = sns.heatmap(PAIR_DISTANCES, cmap=cmap_GDT, vmin=cfg.vmin, vmax=cfg.vmax,
xticklabels=xticklabels, yticklabels=yticklabels,
square=False, annot=False, linewidths=1.0,
ax=ax, cbar_ax=cbar_ax, cbar_kws=cbar_kws, cbar=cfg.show_cbar)
elif cfg.cmap in ["nucleic", "rna", "dna", "RNA", "DNA"]:
hm = sns.heatmap(PAIR_DISTANCES, cmap=cmap_RNA, vmin=cfg.vmin, vmax=cfg.vmax,
xticklabels=xticklabels, yticklabels=yticklabels,
square=False, annot=False, linewidths=1.0,
ax=ax, cbar_ax=cbar_ax, cbar_kws=cbar_kws, cbar=cfg.show_cbar)
else:
hm = sns.heatmap(PAIR_DISTANCES, cmap=cfg.cmap, vmin=cfg.vmin, vmax=cfg.vmax,
xticklabels=xticklabels, yticklabels=yticklabels,
square=False, annot=False, linewidths=1.0,
ax=ax, cbar_ax=cbar_ax, cbar_kws=cbar_kws, cbar=cfg.show_cbar)
if cfg.show_cbar:
cbar = hm.collections[0].colorbar
cbar.set_label(label=cfg.cbar_label, fontweight=cfg.cbar_fontweight)
_misc.cbar_set_ticks_position(cbar, cfg.cbar_location)
if cfg.cbar_ticks is None and cfg.cmap in ["nucleic", "rna", "dna", "RNA", "DNA"]:
cbar.set_ticks(np.arange(0, 22, 2))
if cfg.cbar_ticks is not None:
cbar.set_ticks(cfg.cbar_ticks)
ax.tick_params(left=False, bottom=False) # hide ticks of heatmap
plt.title("Local Accuracy", fontweight='bold')
plt.xlabel("Residue ID", fontweight='bold')
# table labels
if cfg.show_frames:
ax.text(cfg.text_pos_Frame[0], cfg.text_pos_Frame[1], 'Frame', fontweight='bold')
if cfg.show_scores:
ax.text(cfg.text_pos_TS[0], cfg.text_pos_TS[1], 'TS', fontweight='bold')
ax.text(cfg.text_pos_HA[0], cfg.text_pos_HA[1], 'HA', fontweight='bold')
plt.tight_layout()
plt.tight_layout()
if cfg.save_as != None:
_misc.savefig(cfg.save_as)
if len(FRAMES) > 50:
print("Displaying data for more than 50 frames...")
print("Consider reducing the input data (e.g. rank scores and use top 40 frames).")
LA_data = (PAIR_DISTANCES, FRAMES)
return(fig, ax, LA_data) | 8103dbeb9801b08125ebb5e26cb5f76c948262ec | 22,840 |
def simulateGVecs(pd, detector_params, grain_params,
ome_range=[(-np.pi, np.pi), ],
ome_period=(-np.pi, np.pi),
eta_range=[(-np.pi, np.pi), ],
panel_dims=[(-204.8, -204.8), (204.8, 204.8)],
pixel_pitch=(0.2, 0.2),
distortion=None):
"""
returns valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps
panel_dims are [(xmin, ymin), (xmax, ymax)] in mm
pixel_pitch is [row_size, column_size] in mm
simulate the monochormatic scattering for a specified
- space group
- wavelength
- orientation
- strain
- position
- detector parameters
- oscillation axis tilt (chi)
subject to
- omega (oscillation) ranges (list of (min, max) tuples)
- eta (azimuth) ranges
pd................a hexrd.crystallography.PlaneData instance
detector_params...a (10,) ndarray containing the tilt angles (3),
translation (3), chi (1), and sample frame translation
(3) parameters
grain_params......a (12,) ndarray containing the exponential map (3),
translation (3), and inverse stretch tensor compnents
in Mandel-Voigt notation (6).
* currently only one panel is supported, but this will likely change soon
"""
bMat = pd.latVecOps['B']
wlen = pd.wavelength
full_hkls = _fetch_hkls_from_planedata(pd)
# extract variables for convenience
rMat_d = xfcapi.makeDetectorRotMat(detector_params[:3])
tVec_d = np.ascontiguousarray(detector_params[3:6])
chi = detector_params[6]
tVec_s = np.ascontiguousarray(detector_params[7:10])
rMat_c = xfcapi.makeRotMatOfExpMap(grain_params[:3])
tVec_c = np.ascontiguousarray(grain_params[3:6])
vInv_s = np.ascontiguousarray(grain_params[6:12])
# first find valid G-vectors
angList = np.vstack(
xfcapi.oscillAnglesOfHKLs(
full_hkls[:, 1:], chi, rMat_c, bMat, wlen, vInv=vInv_s
)
)
allAngs, allHKLs = _filter_hkls_eta_ome(
full_hkls, angList, eta_range, ome_range
)
if len(allAngs) == 0:
valid_ids = []
valid_hkl = []
valid_ang = []
valid_xy = []
ang_ps = []
else:
# ??? preallocate for speed?
det_xy, rMat_s, on_plane = _project_on_detector_plane(
allAngs,
rMat_d, rMat_c, chi,
tVec_d, tVec_c, tVec_s,
distortion
)
#
on_panel_x = np.logical_and(
det_xy[:, 0] >= panel_dims[0][0],
det_xy[:, 0] <= panel_dims[1][0]
)
on_panel_y = np.logical_and(
det_xy[:, 1] >= panel_dims[0][1],
det_xy[:, 1] <= panel_dims[1][1]
)
on_panel = np.logical_and(on_panel_x, on_panel_y)
#
op_idx = np.where(on_panel)[0]
#
valid_ang = allAngs[op_idx, :]
valid_ang[:, 2] = xfcapi.mapAngle(valid_ang[:, 2], ome_period)
valid_ids = allHKLs[op_idx, 0]
valid_hkl = allHKLs[op_idx, 1:]
valid_xy = det_xy[op_idx, :]
ang_ps = angularPixelSize(valid_xy, pixel_pitch,
rMat_d, rMat_s,
tVec_d, tVec_s, tVec_c,
distortion=distortion)
return valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps | bdff9dc1b7fd15d7b3b1cf45a4364dc495790293 | 22,842 |
import logging
def get_logger(name: str):
"""Get logger call.
Args:
name (str): Module name
Returns:
Logger: Return Logger object
"""
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(get_file_handler())
logger.addHandler(get_stream_handler())
return logger | 6d661896d38e2227b6825e649bdbd719dd64670a | 22,843 |
def create_label_colormap(dataset=_PASCAL):
"""Creates a label colormap for the specified dataset.
Args:
dataset: The colormap used in the dataset.
Returns:
A numpy array of the dataset colormap.
Raises:
ValueError: If the dataset is not supported.
"""
if dataset == _PASCAL:
return create_pascal_label_colormap()
elif dataset == _CITYSCAPES:
return create_cityscapes_label_colormap()
elif dataset == _ADE:
return create_ade_label_colormap()
else:
raise ValueError('Unsupported dataset.') | 683d52f3f2c476b0e39e41ae7f2a0f897fee60d3 | 22,844 |
def SDM_lune(params, dvals, title=None, label_prefix='ham='):
"""Exact calculation for SDM circle intersection. For some reason mine is a slight upper bound on the results found in the book. Uses a proof from Appendix B of the SDM book (Kanerva, 1988). Difference is neglible when norm=True."""
res = expected_intersection_lune(params.n, dvals, params.hamm_radius, params.r )
if params.plot_lines:
plot_line(dvals, res, label_prefix, params.hamm_radius, params.norm)
if params.fit_beta_and_plot_attention:
fit_beta_res, beta = fit_beta_regression(params.n, dvals, res)
plot_line(dvals, fit_beta_res, 'fit_beta | '+label_prefix, params.hamm_radius, params.norm)
if title: # else can call "label plot separately"
label_plot(title, params.norm)
return res | e1465e002632ceb1431fa1a668abfdaa7deb307b | 22,845 |
def font_match(obj):
"""
Matches the given input againts the available
font type matchers.
Args:
obj: path to file, bytes or bytearray.
Returns:
Type instance if matches. Otherwise None.
Raises:
TypeError: if obj is not a supported type.
"""
return match(obj, font_matchers) | 8cf99e626578d278b3ce9e598233cb6dfa407820 | 22,846 |
import six
def time_monotonically_increases(func_or_granularity):
"""
Decorate a unittest method with this function to cause the value
of :func:`time.time` and :func:`time.gmtime` to monotonically
increase by one each time it is called. This ensures things like
last modified dates always increase.
We make three guarantees about the value of :func:`time.time`
returned while the decorated function is running:
1. It is always *at least* the value of the *real*
:func:`time.time`;
2. Each call returns a value greater than the previous call;
3. Those two constraints hold across different invocations of
functions decorated. This decorator can be applied to a
method in a test case::
class TestThing(unittest.TestCase)
@time_monotonically_increases
def test_method(self):
t = time.time()
...
It can also be applied to a bare function taking any number of
arguments::
@time_monotonically_increases
def utility_function(a, b, c=1):
t = time.time()
...
By default, the time will be incremented in 1.0 second intervals.
You can specify a particular granularity as an argument; this is
useful to keep from running too far ahead of the real clock::
@time_monotonically_increases(0.1)
def smaller_increment():
t1 = time.time()
t2 = time.time()
assrt t2 == t1 + 0.1
"""
if isinstance(func_or_granularity, (six.integer_types, float)):
# We're being used as a factory.
wrapper_factory = _TimeWrapper(func_or_granularity)
return wrapper_factory
# We're being used bare
wrapper_factory = _TimeWrapper()
return wrapper_factory(func_or_granularity) | f3f76502cf0cd5f402cb4f585d6cd06db8eb5851 | 22,848 |
def rotate_coordinates(coords: np.ndarray, axis_coords: np.ndarray) -> np.ndarray:
"""
Given a set of coordinates, `coords`, and the eigenvectors of the principal
moments of inertia tensor, use the scipy `Rotation` class to rotate the
coordinates into the principal axis frame.
Parameters
----------
coords : np.ndarray
NumPy 1D array containing xyz coordinates
axis_coords : np.ndarray
NumPy 2D array (shape 3x3) containing the principal axis
vectors
Returns
-------
np.ndarray
NumPy 1D array containing the rotated coordinates.
"""
# Create a Rotation object from the eigenvectors
r_mat = R.from_matrix(axis_coords)
# transform the coordinates into the principal axis
return r_mat.apply(coords) | 686657f464fdf846fa394128ad1f8be6d00adf06 | 22,849 |
from typing import Type
from typing import Any
def get_maggy_ddp_wrapper(module: Type[TorchModule]):
"""Factory function for MaggyDDPModuleWrapper.
:param module: PyTorch module passed by the user.
"""
class MaggyDDPModuleWrapper(TorchDistributedDataParallel):
"""Wrapper around PyTorch's DDP Module.
The wrapper replaces the user's module. Since the module's signature needs to be preserved,
we cannot add the module as an additional parameter during initialization. Instead, it is
configured by its factory function.
"""
__module = module # Avoid overwriting torch module
def __init__(self, *args: Any, **kwargs: Any):
"""Initializes the previously set module, moves it to the GPU and initializes a DDP
module with it.
:param args: Arguments passed by the user for module initialization.
:param kwargs: Keyword arguments passed by the user for module initialization.
"""
# Avoid self because bound method adds to args which makes the function call fail
model = MaggyDDPModuleWrapper.__module(*args, **kwargs).cuda()
super().__init__(model)
return MaggyDDPModuleWrapper | 53f7e5096c41221072d7584470dd8a1bcf32a04f | 22,850 |
def random_jitter(cv_img, saturation_range, brightness_range, contrast_range):
"""
图像亮度、饱和度、对比度调节,在调整范围内随机获得调节比例,并随机顺序叠加三种效果
Args:
cv_img(numpy.ndarray): 输入图像
saturation_range(float): 饱和对调节范围,0-1
brightness_range(float): 亮度调节范围,0-1
contrast_range(float): 对比度调节范围,0-1
Returns:
亮度、饱和度、对比度调整后图像
"""
saturation_ratio = np.random.uniform(-saturation_range, saturation_range)
brightness_ratio = np.random.uniform(-brightness_range, brightness_range)
contrast_ratio = np.random.uniform(-contrast_range, contrast_range)
order = [1, 2, 3]
np.random.shuffle(order)
for i in range(3):
if order[i] == 0:
cv_img = saturation_jitter(cv_img, saturation_ratio)
if order[i] == 1:
cv_img = brightness_jitter(cv_img, brightness_ratio)
if order[i] == 2:
cv_img = contrast_jitter(cv_img, contrast_ratio)
return cv_img | f7ff6d2e0bbe1656abe5ad1dca404e1903417166 | 22,851 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.