filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_30199 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper class that wraps around multiple different compression operators.
This allows for easier testing of different operators. Rather than importing
each operator separately, this class can be used and different
compression_option values can be passed in to specifiy the operator type.
compression_option:
1 - LowRankDecompMatrixCompressor
2 - SimhashMatrixCompressor
3 - DLMatrixCompressor
4 - KmeansMatrixCompressor
8 - KmeansAndPruningMatrixCompressor
9 - InputOutputCompressor
"""
from __future__ import absolute_import
from absl import logging
from graph_compression.compression_lib import compression_op as comp_op
from graph_compression.compression_lib import dl_compression_op
from graph_compression.compression_lib import simhash_compression_op as simhash_comp_op
_COMPRESSION_OPTIONS = [1, 2, 3, 4, 8, 9]
def get_apply_compression(compression_op_spec, global_step):
"""Returns apply_compression operation matching compression_option input."""
compressor_spec = comp_op.LowRankDecompMatrixCompressor.get_default_hparams()
if compression_op_spec.__contains__('rank'):
compressor_spec.set_hparam('rank', compression_op_spec.rank)
if compression_op_spec.__contains__('block_size'):
compressor_spec.set_hparam('block_size', compression_op_spec.block_size)
logging.info('Compressor spec %s', compressor_spec.to_json())
logging.info('Compression operator spec %s', compression_op_spec.to_json())
if compression_op_spec.compression_option not in _COMPRESSION_OPTIONS:
logging.info(
'Compression_option %s not in expected options: %s. '
'Will use low_rank decomp by default.',
str(compression_op_spec.compression_option),
','.join([str(opt) for opt in _COMPRESSION_OPTIONS]))
compression_op_spec.compression_option = 1
apply_compression = None
if compression_op_spec.compression_option == 1:
compressor = comp_op.LowRankDecompMatrixCompressor(spec=compressor_spec)
apply_compression = comp_op.ApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
elif compression_op_spec.compression_option == 2:
compressor_spec.set_hparam('is_b_matrix_trainable', False)
compressor = simhash_comp_op.SimhashMatrixCompressor(spec=compressor_spec)
apply_compression = simhash_comp_op.SimhashApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
elif compression_op_spec.compression_option == 3:
compressor_spec.set_hparam('is_b_matrix_trainable', False)
compressor_spec.set_hparam('use_lsh', True)
compressor = dl_compression_op.DLMatrixCompressor(spec=compressor_spec)
compression_op_spec.set_hparam('use_tpu', False)
apply_compression = dl_compression_op.DLApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
elif compression_op_spec.compression_option == 4:
compressor_spec.set_hparam('is_b_matrix_trainable', True)
compressor = simhash_comp_op.KmeansMatrixCompressor(spec=compressor_spec)
apply_compression = simhash_comp_op.SimhashApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
elif compression_op_spec.compression_option == 8:
compressor_spec.set_hparam('is_b_matrix_trainable', True)
compressor = simhash_comp_op.KmeansMatrixCompressor(spec=compressor_spec)
apply_compression = simhash_comp_op.SimhashApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
elif compression_op_spec.compression_option == 9:
compressor_spec.set_hparam('is_b_matrix_trainable', True)
compressor_spec.set_hparam('is_c_matrix_trainable', True)
compressor_spec.set_hparam('is_d_matrix_trainable', True)
compressor = comp_op.LowRankDecompMatrixCompressor(spec=compressor_spec)
apply_compression = comp_op.ApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
return apply_compression
|
the-stack_106_30200 | from sqlalchemy import Column, Integer, ForeignKey, PrimaryKeyConstraint
import settings
from src.models.company import Company
from src.models.user import User
from src.services.email import EmailService
from src.utils.validators import validate_company_assignment
from src.utils.exceptions import Conflict, HTTPException
from src.adapters.user_company import UserCompanyAdapter
from sqlalchemy.exc import IntegrityError
from src.models.base import Base
class UserCompany(Base, UserCompanyAdapter):
__tablename__ = 'user_company'
__table_args__ = (PrimaryKeyConstraint('user_id', "company_id"), )
user_id = Column(Integer, ForeignKey("user.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
company_id = Column(Integer, ForeignKey("company.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
@classmethod
def get_users(cls, context, company_id):
results = context.query(cls, User).join(User, cls.user_id == User.id).filter(cls.company_id == company_id).all()
return cls.to_json(results)
@classmethod
def add_user(cls, context, company_id, user_id):
user_company = UserCompany()
user_company.company_id = company_id
user_company.user_id = user_id
try:
context.add(user_company)
context.commit()
except IntegrityError:
context.rollback()
raise HTTPException("This user it's already associated with this company", status=400)
user = User.get_user_by_id(context, user_id)
company = Company.get_company_by_id(context, company_id)
email_service = EmailService(api_key=settings.SENDGRID_API_KEY, sender=settings.EMAIL_ADDRESS)
email_service.send_assignment_email(user, company)
@classmethod
def delete_user(cls, context, company_id, user_id):
result = context.query(cls).filter_by(user_id=user_id, company_id=company_id).first()
if not result:
raise HTTPException("The resource you are trying to delete does not exists", status=404)
context.delete(result)
context.commit()
|
the-stack_106_30201 | # Be sure to run this file from the "region_of_acquisition" folder
# cd examples/region_of_acquisition
#
import yaml
import time
import os
import sys
import copy
import pickle
import pybullet as p
import numpy as np
import pandas as pd
import pathos.multiprocessing as mp
from functools import partial
from itertools import repeat
import itertools
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
from matplotlib import rcParams
import inspect
from natsort import natsorted
from somo.sweep import iter_utils
# from utils import label_functions # Need to fix label functions pipeline to come from example folder
def starmap_with_kwargs(pool, fn, args_iter, kwargs_iter):
args_for_starmap = zip(repeat(fn), args_iter, kwargs_iter)
return pool.starmap(apply_args_and_kwargs, args_for_starmap)
def apply_args_and_kwargs(fn, args, kwargs):
return fn(*args, **kwargs)
def process_run(config_file, label_function=None, kwargs={}):
"""Process one dataset from a config file using the prescribed label function"""
global_scale = kwargs.get("global_scale")
label_functions_pkg = kwargs.get("label_functions_pkg")
data_filenames = kwargs.get("data_filenames")
plot_raw = kwargs.get("plot_raw", False)
cycle_key = kwargs.get("cycle_key", None)
# Get data folder
config = iter_utils.load_yaml(config_file)
# Get the global scale
global_scale = float(config.get("setup", {}).get("global_scale", 1.0))
if global_scale is not None:
global_scale = global_scale
global_scale_inv = 1.0 / global_scale
# Set up the label functions
if label_function is None:
process = config.get("setup", {})
label_funs_to_get = process.get("label_functions", "default")
else:
label_funs_to_get = label_function
default_fun = getattr(label_functions_pkg, "default")
default_args = inspect.getfullargspec(default_fun)[0]
if isinstance(label_funs_to_get, str):
label_funs_to_get = [label_funs_to_get]
if isinstance(label_funs_to_get, list):
methods_to_call = dict()
for curr_name in label_funs_to_get:
curr_fun = getattr(label_functions_pkg, curr_name, None)
if curr_fun is not None:
args = inspect.getfullargspec(curr_fun)[0]
methods_to_call[curr_name] = {"function": curr_fun, "args": args}
if methods_to_call:
label_function = methods_to_call
else:
label_function = {
"default": {"function": default_fun, "args": default_args}
}
else:
label_function = {"default": {"function": default_fun, "args": default_args}}
# Get the names of the args specified in the label function
label_fun_inputs = []
for label_fun_key in label_function:
label_fun_inputs.extend(label_function[label_fun_key]["args"])
label_fun_inputs = list(set(label_fun_inputs))
# Get file locations
folder = iter_utils.get_group_folder(config)
print(folder)
success_filename = os.path.join(folder, "summary.yaml")
# Unzip the sweep
sweep = config["sweep"]
sweep_vars = []
sweep_labels = []
sweep_values = []
sweep_diffs = []
sweep_lookup = []
for param in sweep:
sweep_vars.append(iter_utils.parse_variable_name(param["variable"]))
sweep_values.append([])
if param.get("max", None) is not None:
if param["num_steps"] > 1:
sweep_diffs.append(
(param["max"] - param["min"]) / (param["num_steps"] - 1)
)
else:
sweep_diffs.append(0.0)
folder_param = param.get("folder", None)
if folder_param is not None:
folder_setup = os.path.join(folder_param, "sweep_values.yaml")
if os.path.isfile(folder_setup):
curr_lookup = iter_utils.load_yaml(folder_setup)
f = {}
f["names"] = [
os.path.join(folder_param, row["name"])
for row in curr_lookup["files"]
]
f["values"] = [row["values"] for row in curr_lookup["files"]]
curr_lookup["files"] = f
sweep_lookup.append(curr_lookup)
sweep_labels.append(curr_lookup["variables"])
else:
sweep_lookup.append(None)
else:
sweep_lookup.append(None)
sweep_labels.append(
iter_utils.parse_variable_name(param.get("label", None))
)
print(sweep_vars)
# Get the list of all folders
run_folders = iter_utils.get_folders(folder)
# Read in each data file and parse it
label_vals = {}
for key in label_function:
label_vals[key] = []
num_finger_segs = []
for curr_folder in run_folders:
print(curr_folder)
param_filename = os.path.join(curr_folder, "params.yaml")
params = iter_utils.load_yaml(param_filename)
for idx, var in enumerate(sweep_vars):
val = iter_utils.get_from_dict(params, var)
if sweep_lookup[idx] is not None:
try:
num_idx = sweep_lookup[idx]["files"]["names"].index(val)
val_use = sweep_lookup[idx]["files"]["values"][num_idx]
except ValueError:
val_use = val
else:
val_use = val
sweep_values[idx].append(val_use)
# Get object position data if needed
if "objectpose" in label_fun_inputs:
fields = [
"timeStamp",
"objectId",
"posX",
"posY",
"posZ",
"oriX",
"oriY",
"oriZ",
"oriW",
]
pose_file = os.path.join(curr_folder, data_filenames["objectpose"])
reader = iter_utils.read_parse_data(pose_file)
df = reader.make_dataframe(fields)
for pos in ["posX", "posY", "posZ"]:
df[pos] = global_scale_inv * df[pos]
# Get euler angles from the quaternions
euler = []
for quaternion in zip(df["oriX"], df["oriY"], df["oriZ"], df["oriW"]):
# print(quaternion)
euler.append(p.getEulerFromQuaternion(quaternion))
euler = np.array(euler)
euler = np.unwrap(euler, axis=0)
euler = np.rad2deg(euler)
df["eulerX"] = euler[:, 0]
df["eulerY"] = euler[:, 1]
df["eulerZ"] = -euler[:, 2]
df_rel = df - df.iloc[0].values.squeeze()
if plot_raw and cycle_key is not None:
act_file = os.path.join(curr_folder, data_filenames["actuation"])
iter_utils.graph_data(
df_rel,
filename=pose_file,
cyc_filename=act_file,
cyclic_key=cycle_key,
)
# iter_utils.graph_cyclic(df, act_file, cycle_key)
else:
df = None
# Get contact data if needed
if "contact" in label_fun_inputs:
filename_contact = os.path.join(curr_folder, data_filenames["contact"])
if os.path.exists(filename_contact):
fields = [
"timeStamp",
"stepCount",
"bodyUniqueIdA",
"bodyUniqueIdB",
"linkIndexA",
"linkIndexB",
]
reader = iter_utils.read_parse_data(filename_contact)
df_contact = reader.make_dataframe(fields)
else:
df_contact = None
else:
df_contact = None
# Get actuation data if needed
if "actuation" in label_fun_inputs:
filename_actuation = os.path.join(curr_folder, data_filenames["actuation"])
if os.path.exists(filename_actuation):
df_actuation = pd.read_pickle(filename_actuation)
for col in df_actuation.columns.values:
if "actuation" in col:
df_actuation[col] = pow(global_scale_inv, 2) * df_actuation[col]
else:
df_actuation = None
else:
df_actuation = None
# Get the number of finger segments
calc_file = os.path.join(curr_folder, data_filenames["calculated"])
calc_params = iter_utils.load_yaml(calc_file)
num_finger_segs.append(calc_params.get("num_finger_segs", []))
# package the correct data to give to the label function
label_fun_send_list = {
"objectpose": df,
"contact": df_contact,
"actuation": df_actuation,
}
# Get the labels from the label functions
for label_fun_key in label_function:
label_fun_send = dict()
for key in label_function[label_fun_key]["args"]:
label_fun_send[key] = label_fun_send_list[key]
curr_val = label_function[label_fun_key]["function"](**label_fun_send)
label_vals[label_fun_key].append(curr_val)
if "save_raw_data" in label_function.keys():
out = {}
if df is not None:
out["objectpose"] = df.to_dict(orient="list")
if df_contact is not None:
out["contact"] = df_contact.to_dict(orient="list")
if df_actuation is not None:
out["actuation"] = df_actuation.to_dict(orient="list")
out_file = os.path.join(curr_folder, "raw_data.pkl")
with open(out_file, "wb") as f:
pickle.dump(out, f)
results = dict()
results["labels"] = label_vals
results["vars"] = sweep_vars
results["varlabels"] = sweep_labels
results["sweep"] = sweep_values
results["diffs"] = sweep_diffs
results["num_finger_segs"] = num_finger_segs
iter_utils.save_yaml(results, success_filename)
data = flatten_data(results)
filename, ext = os.path.splitext(success_filename)
iter_utils.save_yaml(data, filename + "_flattened" + ext)
return results
def flatten_dict(dd, separator="_", prefix=""):
return (
{
prefix + separator + k if prefix else k: v
for kk, vv in dd.items()
for k, v in flatten_dict(vv, separator, kk).items()
}
if isinstance(dd, dict)
else {prefix: dd}
)
def flatten_data(results):
# Flatten data in labels
labels_in_graph = list(results["labels"].keys())
for label_name in labels_in_graph:
new_label_keys = []
data_in = results["labels"][label_name]
if isinstance(data_in[0], dict):
for idx, row in enumerate(data_in):
new_row = flatten_dict(row)
for key in new_row:
new_key = label_name + "_" + key
if not results["labels"].get(new_key, False):
results["labels"][new_key] = []
new_label_keys.append(new_key)
results["labels"][new_key].append(new_row[key])
del results["labels"][label_name]
labels_in_graph.extend(new_label_keys)
labels_in_graph.remove(label_name)
df = pd.DataFrame(results["labels"])
# Add in variable values.
varlabels = results["varlabels"]
print(varlabels)
for idx, vals in enumerate(results["sweep"]):
# If there are other dimensions added, add them to the dataframe
df.loc[:, varlabels[idx]] = vals
data = df.to_dict(orient="list")
return data
class DataLabeler:
def __init__(self, label_functions):
self.label_functions_pkg = label_functions
all_filenames = iter_utils.load_yaml("save_paths.yaml")
self.data_filenames = all_filenames["data"]
self.global_scale = None
def set_global_scale(self, scale):
self.global_scale = float(scale)
def process_all(self, config_file, label_function=None, **kwargs):
"""Process all datasets within a config file"""
kwargs["global_scale"] = copy.deepcopy(self.global_scale)
kwargs["label_functions_pkg"] = self.label_functions_pkg
kwargs["data_filenames"] = copy.deepcopy(self.data_filenames)
config = iter_utils.load_yaml(config_file)
setup = config.get("setup", {})
slices_2d = setup.get("slices_2d", False)
base_folder = iter_utils.get_group_folder(config)
print(base_folder)
if slices_2d:
dir_list = os.listdir(base_folder)
dir_list = natsorted(dir_list)
folders = [
os.path.join(base_folder, subdir)
for subdir in dir_list
if os.path.isdir(os.path.join(base_folder, subdir))
]
# Create a variable summary for folders
num_vars_to_use = len(config["sweep"]) - 2
out = {}
out["vars"] = []
out["sweep"] = []
for idx in range(num_vars_to_use):
if config["sweep"][idx].get("folder", False):
folder_param = config["sweep"][idx].get("folder")
folder_setup = os.path.join(folder_param, "sweep_values.yaml")
folder_config = iter_utils.load_yaml(folder_setup)
out["vars"].extend(folder_config["variables"])
for idx, _ in enumerate(folder_config["variables"]):
values = []
for file_col in folder_config["files"]:
values.append(file_col["values"][idx])
out["sweep"].append(values)
elif config["sweep"][idx].get("values", False):
values = config["sweep"][idx].get("values")
out["vars"].append(config["sweep"][idx].get("label"))
out["sweep"].append(values)
elif config["sweep"][idx].get("max", False):
gs_inv = 1.0 / (self.global_scale)
maxi = config["sweep"][idx].get("max") * gs_inv
mini = config["sweep"][idx].get("min") * gs_inv
steps = config["sweep"][idx].get("num_steps")
values = np.linspace(mini, maxi, steps).tolist()
out["vars"].append(config["sweep"][idx].get("label"))
out["sweep"].append(values)
param_list = itertools.product(*out["sweep"])
all_permutations = []
for var in out["vars"]:
all_permutations.append([])
for item in param_list:
for var_idx in range(len(out["vars"])):
all_permutations[var_idx].append(item[var_idx])
out["sweep"] = all_permutations
iter_utils.save_yaml(out, os.path.join(base_folder, "summary.yaml"))
else:
folders = [""]
# Summarize all the data
if True:
parallel = kwargs.get("parallel", True)
if parallel:
if "num_processes" in kwargs:
num_processes = kwargs.get("num_processes")
else:
num_processes = os.cpu_count()
all_config_files = []
for folder in folders:
all_config_files.append(
os.path.join(base_folder, folder, "config.yaml")
)
pool = mp.Pool(num_processes)
# args_iter = zip(all_config_files,repeat(label_function))
# kwargs_iter = repeat(kwargs)
process_run_simple = partial(
process_run, label_function=label_function, kwargs=kwargs
)
pool.map(process_run_simple, all_config_files)
else:
for folder in folders:
new_config_file = os.path.join(base_folder, folder, "config.yaml")
print(new_config_file)
process_run(new_config_file, label_function, kwargs)
if __name__ == "__main__":
config_file = "sweeps/grid_design_grasps.yaml"
labeler = DataLabeler()
labeler.process_all(config_file)
|
the-stack_106_30202 | import http.client
from os import getenv
# import dotenv
from flask import json
# dotenv.load_dotenv()
# api_key = getenv('API_KEY')
class MainWallet():
def __init__(self):
self.key = ""
def initialize_wallet(self):
wallet_data = create_wallet(self.key)
print("Wallet-data: ", wallet_data)
self.address = wallet_data['address']
self.secret = wallet_data['secret']
main_account = create_virtual_currency(self.key)
if 'errorCode' in main_account.keys():
main_account = get_virtual_currency(self.key)
self.main_account_id = main_account['accountId']
else:
self.main_account_id = main_account['id']
print("Main-account-data: ", main_account)
def create_wallet(api_key):
conn = http.client.HTTPSConnection("api-eu1.tatum.io")
headers = { 'x-api-key': api_key}
conn.request("GET", "/v3/algorand/wallet", headers=headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def create_virtual_currency(api_key):
conn = http.client.HTTPSConnection("api-eu1.tatum.io")
payload = "{\"name\":\"VC_ZAR\",\"supply\":\"1000000000\",\"basePair\":\"ZAR\",\"baseRate\":1,\"customer\":{\"accountingCurrency\":\"ZAR\",\"customerCountry\":\"SA\",\"externalId\":\"123654\",\"providerCountry\":\"SA\"},\"description\":\"Mbongo Virtual Currency.\",\"accountCode\":\"Main_Account\",\"accountNumber\":\"1234567890\",\"accountingCurrency\":\"ZAR\"}"
headers = {
'content-type': "application/json",
'x-api-key': api_key
}
conn.request("POST", "/v3/ledger/virtualCurrency", payload, headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def get_virtual_currency(api_key):
conn = http.client.HTTPSConnection("api-eu1.tatum.io")
headers = { 'x-api-key': api_key }
conn.request("GET", "/v3/ledger/virtualCurrency/VC_ZAR", headers=headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8")) |
the-stack_106_30204 | """
Tests for application.
"""
import responses
def test_get_info(helpers, fb_api):
with responses.RequestsMock() as m:
m.add(
method=responses.GET,
url=f"https://graph.facebook.com/{fb_api.version}/{fb_api.app_id}",
json=helpers.load_json(
"testdata/facebook/apidata/applications/application_info.json"
),
)
app = fb_api.application.get_info()
assert app.id == "123456789"
app_json = fb_api.application.get_info(
fields="id,category,description,link,name,namespace", return_json=True
)
assert app_json["id"] == "123456789"
def test_get_accounts(helpers, fb_api):
with responses.RequestsMock() as m:
m.add(
method=responses.GET,
url=f"https://graph.facebook.com/{fb_api.version}/{fb_api.app_id}/accounts",
json=helpers.load_json(
"testdata/facebook/apidata/applications/application_accounts.json",
),
)
accounts = fb_api.application.get_accounts(count=None)
assert len(accounts.data) == 4
accounts_json = fb_api.application.get_accounts(
fields="id,login_url", count=3, limit=4, return_json=True
)
assert accounts_json["data"][0]["id"] == "123456789"
|
the-stack_106_30205 | import collections
c = collections.Counter('extremely')
c['z'] = 0
print(c)
#该elements()方法返回一个迭代器,
# 它生成所有已知的项目Counter。
print(list(c.elements()))
"""
output:
Counter({'e': 3, 'x': 1, 't': 1, 'r': 1, 'm': 1, 'l': 1, 'y': 1,
'z': 0})
['e', 'e', 'e', 'x', 't', 'r', 'm', 'l', 'y']
""" |
the-stack_106_30206 | import sys
import time
from datetime import datetime
from pynng import Req0, Rep0, Timeout
# print(str(datetime.now()))
def node0(url: str):
with Rep0(listen=url, recv_timeout=100) as sock:
while True:
try:
msg = sock.recv()
if str(msg.decode()) == 'DATE':
print(f'NODE0: RECEIVED DATE REQUEST')
data = str(datetime.now())
print(f'NODE0: SENDING DATE {data}')
sock.send(data.encode())
except Timeout:
pass
time.sleep(0.5)
def node1(url: str):
with Req0(dial=url) as sock:
print(f'NODE1: SENDING DATE REQUEST')
sock.send('DATE'.encode())
msg = sock.recv()
print(f'NODE1: RECEIVED DATE {msg.decode()}')
if __name__ == "__main__":
# print(sys.argv)
if len(sys.argv) > 2 and sys.argv[1] == 'node0':
node0(sys.argv[2])
elif len(sys.argv) > 2 and sys.argv[1] == 'node1':
node1(sys.argv[2])
else:
print(f"Usage: {sys.argv[0]} node0|node1 <URL> ...")
sys.exit(1)
|
the-stack_106_30208 | # -*- coding: UTF-8 -*-
import base64
import json
import sys
from datetime import datetime
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from QcloudApi.qcloudapi import QcloudApi
# 配置文件、日志路径与证书文件夹
config_file_path = sys.path[0] + '/config.json'
tmp_file_path = sys.path[0] + '/temp.log'
cert_file_folder = sys.path[0] + '/cert/'
# 读取文件的通用函数
def try_to_open_file(file_path, exit_type):
try:
open_file = open(file_path, 'r')
except Exception as e:
if exit_type == 0:
return False, e
else:
print(e)
sys.exit(0)
else:
read_file = open_file.read()
open_file.close()
return True, read_file
# 载入配置文件
config_json = json.loads(try_to_open_file(config_file_path, 1)[1])
# 查找证书的有效期、生效时间
# 查找证书中包含的所有域名
def check_cert_info(cert_file_path):
crt_open = open(cert_file_path, 'r')
crt_content = crt_open.read()
crt_open.close()
crt = x509.load_pem_x509_certificate(crt_content.encode(), default_backend())
# 获取证书序列号
crt_serial_number = crt.serial_number
# 获取alt name
crt_altname = crt.extensions.get_extension_for_oid(x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME)
crt_altname = crt_altname.value.get_values_for_type(x509.DNSName)
# 获取生效时间
crt_not_valid_before = crt.not_valid_before
# 获取过期时间
crt_not_valid_after = crt.not_valid_after
crt_output = dict()
crt_output['crt_altname'] = crt_altname
crt_output['crt_not_valid_before'] = crt_not_valid_before
crt_output['crt_not_valid_after'] = crt_not_valid_after
crt_output['crt_serial_number'] = crt_serial_number
return crt_output
def crt_chk_alt_name(domain, alt_name):
for i in alt_name:
if i[0] == "*":
i = i[2:]
domain = domain.replace(i, '')
if domain.count('.') == 1:
return True
else:
pass
else:
if i == domain:
return True
else:
pass
return False
# 检查证书是否已生效
# 检查证书是否已过期
# 检查目标域名是否在该证书中
def format_cert_key(domain_name, crt_file_name, key_file_name, crt_not_valid_before, crt_not_valid_after):
crt_file_path = cert_file_folder + crt_file_name
key_file_path = cert_file_folder + key_file_name
datetime_now = datetime.utcnow()
# 如果现在的时间大于证书生效时间,则为True
if datetime_now >= crt_not_valid_before:
# 如果证书失效时间-现在的时间大于配置文件中validity(单位:天)的值,则为True
if (crt_not_valid_after - datetime_now).days >= int(config_json[domain_name]['validity']):
crt_content = try_to_open_file(crt_file_path, 1)
crt_base64 = base64.encodebytes(crt_content[1].encode()).decode()
key_content = try_to_open_file(key_file_path, 1)
key_base64 = base64.encodebytes(key_content[1].encode()).decode()
output_dict = dict()
output_dict['crt'] = crt_base64
output_dict['key'] = key_base64
return output_dict
else:
return False
else:
return False
def get_cdn_domain(config):
# 腾讯云基础设定
action = 'DescribeCdnHosts'
module = 'cdn'
params = {
'detail': 0
}
# 调用API
service = QcloudApi(module, config)
# 可输出编码后的URL,主要用于日志,也可以生成URL后手动执行
# 自动化应用一般不需要
# print(service.generateUrl(action, params))
# 执行API
qcloud_output = service.call(action, params).decode()
qcloud_output_json = json.loads(qcloud_output)
cdn_host_list = []
for i in qcloud_output_json['data']['hosts']:
cdn_host_list.append(i['host'])
return cdn_host_list
def write_temp_file(qcloud_output, domain, crt_serial_number):
qcloud_output_json = json.loads(qcloud_output)
if qcloud_output_json['code'] == 0:
o_file = try_to_open_file(tmp_file_path, 0)
if o_file[0]:
content_dict = json.loads(o_file[1])
else:
content_dict = dict()
o_file = open(tmp_file_path, 'w')
content_dict[domain] = crt_serial_number
content_dict = json.dumps(content_dict)
o_file.write(str(content_dict))
o_file.close()
else:
pass
def main_run():
# 遍历字典
for key, value in config_json.items():
# 如果cert_filename为空,则将证书文件名为默认的[domain_name].crt
# 否则则加载配置文件中的文件名
if value['cert_filename'] == "":
cert_filename = key + '.crt'
else:
cert_filename = value['cert_filename']
# 如果cert_filename为空,则将证书文件名为默认的[domain_name].key
# 否则则加载配置文件中的文件名
if value['key_filename'] == "":
key_filename = key + '.key'
else:
key_filename = value['key_filename']
check_cert_info_dict = check_cert_info(cert_file_folder + cert_filename)
open_temp_file = try_to_open_file(tmp_file_path, 0)
if open_temp_file[0]:
open_temp_file = json.loads(open_temp_file[1])
if key in open_temp_file:
if open_temp_file[key] == check_cert_info_dict['crt_serial_number']:
continue
else:
pass
else:
pass
else:
pass
if crt_chk_alt_name(key, check_cert_info_dict['crt_altname']):
pass
else:
continue
crt_not_valid_before = check_cert_info_dict['crt_not_valid_before']
crt_not_valid_after = check_cert_info_dict['crt_not_valid_after']
# 调用函数,检查证书是否合规
crt_key_dict = format_cert_key(key, cert_filename, key_filename, crt_not_valid_before, crt_not_valid_after)
# 不合规则跳过,进入下一循环
if crt_key_dict:
# 腾讯云基础设定
config = {
'secretId': value['secret_id'],
'secretKey': value['secret_key'],
}
cdn_domain_list = get_cdn_domain(config)
if key in cdn_domain_list:
action = 'SetHttpsInfo'
module = 'cdn'
params = {
'host': key,
'httpsType': value['https_type'],
'forceSwitch': value['https_force_switch'],
'http2': value['http2'],
'cert': crt_key_dict['crt'],
'privateKey': crt_key_dict['key']
}
# 调用API
service = QcloudApi(module, config)
# 可输出编码后的URL,主要用于日志,也可以生成URL后手动执行
# 自动化应用一般不需要
# print(service.generateUrl(action, params))
# 执行API
qcloud_output = service.call(action, params).decode()
print(qcloud_output)
write_temp_file(qcloud_output, key, check_cert_info_dict['crt_serial_number'])
else:
continue
else:
continue
main_run()
|
the-stack_106_30211 | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
import pathlib
import sys
import pytest
from torch.utils.data import DataLoader
from composer import Callback, Event, State, Trainer
from composer.loggers import FileLogger, FileLoggerHparams, Logger, LoggerDestination, LogLevel
from composer.utils.collect_env import disable_env_report
from tests.common.datasets import RandomClassificationDataset
from tests.common.models import SimpleModel
class FileArtifactLoggerTracker(LoggerDestination):
def __init__(self) -> None:
self.logged_artifacts = []
def log_file_artifact(self, state: State, log_level: LogLevel, artifact_name: str, file_path: pathlib.Path, *,
overwrite: bool):
del state, overwrite # unused
self.logged_artifacts.append((log_level, artifact_name, file_path))
@pytest.mark.parametrize("log_level", [LogLevel.EPOCH, LogLevel.BATCH])
@pytest.mark.timeout(10)
def test_file_logger(dummy_state: State, log_level: LogLevel, tmpdir: pathlib.Path):
log_file_name = os.path.join(tmpdir, "output.log")
log_destination = FileLoggerHparams(
log_interval=3,
log_level=log_level,
filename=log_file_name,
artifact_name="{run_name}/rank{rank}.log",
buffer_size=1,
flush_interval=1,
).initialize_object()
file_tracker_destination = FileArtifactLoggerTracker()
logger = Logger(dummy_state, destinations=[log_destination, file_tracker_destination])
log_destination.run_event(Event.INIT, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
dummy_state.timestamp = dummy_state.timestamp.to_next_batch()
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
dummy_state.timestamp = dummy_state.timestamp.to_next_batch()
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
dummy_state.timestamp = dummy_state.timestamp.to_next_epoch()
log_destination.run_event(Event.EPOCH_END, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
logger.data_fit({"metric": "fit"}) # should print
logger.data_epoch({"metric": "epoch"}) # should print on batch level, since epoch calls are always printed
logger.data_batch({"metric": "batch"}) # should print on batch level, since we print every 3 steps
dummy_state.timestamp = dummy_state.timestamp.to_next_epoch()
log_destination.run_event(Event.EPOCH_END, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
logger.data_epoch({"metric": "epoch1"}) # should print, since we log every 3 epochs
dummy_state.timestamp = dummy_state.timestamp.to_next_epoch()
log_destination.run_event(Event.EPOCH_END, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
dummy_state.timestamp = dummy_state.timestamp.to_next_batch()
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
logger.data_epoch({"metric": "epoch2"}) # should print on batch level, since epoch calls are always printed
logger.data_batch({"metric": "batch1"}) # should NOT print
dummy_state.timestamp = dummy_state.timestamp.to_next_batch()
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
dummy_state.timestamp = dummy_state.timestamp.to_next_epoch()
log_destination.run_event(Event.EPOCH_END, dummy_state, logger)
log_destination.close(dummy_state, logger)
with open(log_file_name, 'r') as f:
if log_level == LogLevel.EPOCH:
assert f.readlines() == [
'[FIT][batch=2]: { "metric": "fit", }\n',
'[EPOCH][batch=2]: { "metric": "epoch1", }\n',
]
else:
assert log_level == LogLevel.BATCH
assert f.readlines() == [
'[FIT][batch=2]: { "metric": "fit", }\n',
'[EPOCH][batch=2]: { "metric": "epoch", }\n',
'[BATCH][batch=2]: { "metric": "batch", }\n',
'[EPOCH][batch=2]: { "metric": "epoch1", }\n',
'[EPOCH][batch=3]: { "metric": "epoch2", }\n',
]
# Flush interval is 1, so there should be one log_file call per LogLevel
# Flushes also happen per each eval_start, epoch_start, and close()
# If the loglevel is batch, flushing also happens every epoch end
if log_level == LogLevel.EPOCH:
#
assert len(file_tracker_destination.logged_artifacts) == int(dummy_state.timestamp.epoch) + int(
dummy_state.timestamp.epoch) + 1
else:
assert log_level == LogLevel.BATCH
assert len(file_tracker_destination.logged_artifacts) == int(dummy_state.timestamp.batch) + int(
dummy_state.timestamp.epoch) + int(dummy_state.timestamp.epoch) + 1
@pytest.mark.timeout(15) # disk can be slow on Jenkins
def test_file_logger_capture_stdout_stderr(dummy_state: State, tmpdir: pathlib.Path):
log_file_name = os.path.join(tmpdir, "output.log")
log_destination = FileLoggerHparams(filename=log_file_name,
buffer_size=1,
flush_interval=1,
capture_stderr=True,
capture_stdout=True).initialize_object()
# capturing should start immediately
print("Hello, stdout!\nExtra Line")
print("Hello, stderr!\nExtra Line2", file=sys.stderr)
logger = Logger(dummy_state, destinations=[log_destination])
log_destination.run_event(Event.INIT, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
log_destination.close(dummy_state, logger)
with open(log_file_name, 'r') as f:
assert f.readlines() == [
'[stdout]: Hello, stdout!\n',
'[stdout]: Extra Line\n',
'[stderr]: Hello, stderr!\n',
'[stderr]: Extra Line2\n',
]
class ExceptionRaisingCallback(Callback):
def fit_start(self, state: State, logger: Logger) -> None:
del state, logger # unused
raise RuntimeError("My Exception!")
def test_exceptions_are_printed(tmpdir: pathlib.Path):
# Test that exceptions are printed to stderr, which is captured by the file logger
# The file logger stops capturing stdout/stderr when it is closed
# Here, we construct a trainer that raises an exception on Event.FIT_START
# and assert that the exception is written to the logfile
exception_raising_callback = ExceptionRaisingCallback()
logfile_name = str(tmpdir / "logfile.txt")
file_logger = FileLogger(filename=logfile_name, capture_stderr=True)
dataloader = DataLoader(RandomClassificationDataset())
model = SimpleModel()
trainer = Trainer(model=model,
train_dataloader=dataloader,
max_duration=1,
callbacks=[exception_raising_callback],
loggers=[file_logger])
disable_env_report() # Printing the full report in this test can cause timeouts
# manually calling `sys.excepthook` for the exception, as it is impossible to write a test
# that validates unhandled exceptions are logged, since the test validation code would by definition
# need to handle the exception!
try:
trainer.fit()
except RuntimeError:
exc_type, exc_value, tb = sys.exc_info()
assert exc_type is not None
assert exc_value is not None
assert tb is not None
sys.excepthook(exc_type, exc_value, tb)
trainer.close()
with open(logfile_name, "r") as f:
log_lines = f.readlines()
assert "[stderr]: RuntimeError: My Exception!\n" == log_lines[-1]
# Since the trainer was closed, future prints should not appear in the file logger
print("SHOULD NOT BE CAPTURED")
with open(logfile_name, "r") as f:
logfile = f.read()
assert "SHOULD NOT BE CAPTURED" not in logfile
|
the-stack_106_30215 | import functools
import math
import operator
import random
import re
import sys
import error
def isdigit(string):
return all(i in '1234567890-.' for i in string)
def eval_(string):
if '.' in string: return float(string)
try: return int(string)
except: return string
class Stack(list):
def push(self, *values):
for v in values:
try: self.append(v.replace("'",'"'))
except: self.append(v)
def pop(self, index=-1):
return super().pop(index)
def peek(self, index=-1):
return self[index]
def swap(self):
self[-1], self[-2] = self[-2], self[-1]
def add(x,y):
return y + x
def subtract(x,y):
return y - x
def multiply(x,y):
return y * x
def divide(x,y):
return y / x
def exponent(x,y):
return y ** x
def modulo(x,y):
return y % x
def isprime(x):
for i in range(2,x):
if x%i == 0:
return False
return x > 1 and isinstance(x, int)
class Null:
def __init__(self, value):
self.value = value
class StackScript:
def __init__(self, code, args, funcs, stack, line, outer):
self.args = args
self.register = args if args else 0
self.stacks = [stack]
self.index = 0
self.code = StackScript.tokenize(code + ' ')
self.prevcall = None
self.functions = funcs
cont = False
outer = outer.split('\n')
for i, cmd in enumerate(self.code):
while True:
try:
self.stack = self.stacks[self.index]
break
except:
self.stacks.append(Stack())
if cont:
cont -= 1
continue
if cmd[0] == '"':
self.stack.push(cmd[1:])
elif cmd[0] == '{' and cmd[-1] == '}':
if cmd[1] == ',':
argcount = abs(int(self.stack.pop()))
sslice = 2
else:
argcount = 0
sslice = 1
try:
func = self.functions[cmd[sslice:-1]]
except:
raise error.UnableToRetrieveFunctionError(line, outer[line-1], cmd[1:-1])
feed = []
if func.lamb:
feed.extend(list(self.stack))
self.stack.clear()
else:
while len(feed) < (argcount or func.args):
feed.append(self.stack.pop())
feed = feed[::-1]
self.prevcall = func(*feed, funccall = True)
self.stack.push(self.prevcall)
elif isdigit(cmd):
self.stack.push(eval_(cmd))
else:
cmd = cmd.strip()
if not cmd:
continue
if cmd == 'Q':
if self.stack.pop():
cont = -1
continue
try:
result = self.COMMANDS[cmd]()
except TypeError:
raise error.IncongruentTypesError(line, outer[line-1], cmd)
except:
raise error.EmptyStackError(line, outer[line-1])
if result == Null:
raise error.InvalidSymbolError(line, outer[line-1], cmd)
if type(result) == Stack:
self.stacks[self.index] = result
del result
@staticmethod
def tokenize(text):
final = []
stemp = ''
ctemp = ''
num = ''
instr = False
incall = False
text = text.replace('{', ' {')
for i, char in enumerate(text):
if char == '"': instr = not instr
if char == '{': incall = True
if char == '}': incall = False; ctemp += '}'; continue
if instr: stemp += char
elif incall:ctemp += char
else:
if stemp:
final.append(stemp)
stemp = ''
if ctemp:
final.append(ctemp)
ctemp = ''
if isdigit(char):
try:
if char == '-':
if text[text.index(char)+1].isdigit():
num += char
else:
num += char
except: final.append(char)
else:
if num:
final.append(num)
num = ''
final.append(char)
if stemp: final.append(stemp)
if ctemp: final.append(ctemp)
if num: final.append(num)
tokens = []
for i, f in enumerate(final):
if f in 'Bb':
tokens.append(f + final.pop(i + 1))
elif f in '" ':
pass
else:
tokens.append(f)
return tokens
@property
def COMMANDS(self):
return {
'!': lambda: self.stack.push(not self.stack.pop()),
'#': lambda: self.stack.sort(),
'$': lambda: self.stack.swap(),
'%': lambda: self.stack.push(modulo(self.stack.pop(), self.stack.pop())),
'&': lambda: self.stack.push(self.stack.pop() and self.stack.pop()),
"'": lambda: self.stack.push(self.stack.pop() * 2),
'(': lambda: self.decrement(),
')': lambda: self.increment(),
'*': lambda: self.stack.push(multiply(self.stack.pop(), self.stack.pop())),
'+': lambda: self.stack.push(add(self.stack.pop(), self.stack.pop())),
'/': lambda: self.stack.push(divide(self.stack.pop(), self.stack.pop())),
':': lambda: Null,
'<': lambda: self.stack.push(self.stack.pop() < self.stack.pop()),
'=': lambda: self.stack.push(self.stack.pop() == self.stack.pop()),
'>': lambda: self.stack.push(self.stack.pop() > self.stack.pop()),
'?': lambda: self.stack.push(bool(self.stack.pop())),
'@': lambda: self.stack.reverse(),
'A': lambda: self.stack.push(*self.args),
'B': lambda: self.stack.push(self.stack[:self.stack.pop()]),
'C': lambda: self.stack.push(chr(self.stack.pop())),
'D': lambda: self.stack.push(self.stack[-self.stack.pop()]),
'E': lambda: self.stack.push(enumerate(self.stack.pop())),
'F': lambda: self.stack.push(*self.factors()),
'G': lambda: self.stack.push(self.register),
'H': lambda: print(''.join(map(str, self.stack))),
'I': lambda: Null,
'J': lambda: self.join(''),
'K': lambda: Null,
'L': lambda: self.stack.push(len(self.stack)),
'M': lambda: self.stack.push(max(self.stack)),
'N': lambda: self.stack.push('\n'.join(map(str, self.stack))),
'O': lambda: self.stack.push(ord(self.stack.pop())),
'P': lambda: self.stack.push(isprime(self.stack.pop())),
'R': lambda: self.stack.push(list(range(1, self.stack.pop()+1))),
'S': lambda: self.stack.push(self.remove_duplicates()),
'T': lambda: Null,
'U': lambda: Null,
'V': lambda: self.store(self.stack.pop()),
'X': lambda: self.stack.push([[self.stack[-1] for _ in range(self.stack.pop())], self.stack.pop()][0]),
'Y': lambda: Null,
'Z': lambda: Null,
'[': lambda: self.stack.push(self.prevcall),
']': lambda: self.run_lambda(self.stack.pop()),
'^': lambda: self.stack.push(exponent(self.stack.pop(), self.stack.pop())),
'_': lambda: self.stack.push(subtract(self.stack.pop(), self.stack.pop())),
'`': lambda: Null,
'a': lambda: self.stack.push(list(self.args)),
'b': lambda: Null,
'c': lambda: self.stack.clear(),
'd': lambda: self.stack.push(self.stack[-1]),
'e': lambda: self.stack.push(self.stack.pop() in self.stack.pop()),
'f': lambda: self.stack.push(*filter(isprime, self.factors())),
'g': lambda: Null,
'h': lambda: print(self.stack),
'i': lambda: self.stack.push(int(self.stack.pop())),
'j': lambda: self.join(str(self.stack.pop())),
'k': lambda: Null,
'l': lambda: Null,
'm': lambda: self.stack.push(min(self.stack)),
'n': lambda: self.join(),
'o': lambda: self.stack.push(self.stack.pop() or self.stack.pop()),
'p': lambda: self.stack.pop(),
'q': lambda: self.stack.push(set(self.stack.pop())),
'r': lambda: self.stack.push(list(range(self.stack.pop(), self.stack.pop()))),
's': lambda: self.stack.push(sum(self.stack)),
't': lambda: Null,
'u': lambda: Null,
'v': lambda: Null,
'w': lambda: Null,
'x': lambda: self.stack.push([self.stack[-1] for _ in range(self.stack.pop())]),
'y': lambda: [self.stack.push(self.stack[-1]) for _ in range(self.stack.pop())],
'z': lambda: Null,
'|': lambda: self.stack.push(abs(self.stack.pop())),
'~': lambda: Null,
'B!':lambda: self.apply(operator.not_),
'B#':lambda: self.apply(sorted),
'B$':lambda: Null,
'B%':lambda: self.apply(lambda l: functools.reduce(operator.mod, l)),
'B&':lambda: self.apply(lambda l: functools.reduce(operator.and_, l)),
"B'":lambda: self.apply(lambda x: 2 * x),
'B(':lambda: self.stack.push(self.stacks[self.index - 1].pop()),
'B)':lambda: self.stack.push(self.stacks[(self.index + 1) % len(self.stacks)].pop()),
'B*':lambda: self.apply(lambda l: functools.reduce(operator.mul, l)),
'B+':lambda: self.apply(lambda l: functools.reduce(operator.add, l)),
'B/':lambda: self.apply(lambda l: functools.reduce(operator.truediv, l)),
'B:':lambda: Null,
'B<':lambda: Null,
'B=':lambda: self.apply(lambda l: self.eq(*l)),
'B>':lambda: Null,
'B?':lambda: Null,
'B@':lambda: self.apply(reversed, True),
'BA':lambda: self.apply(abs),
'BB':lambda: self.stack.push(bin(self.stack.pop())[2:]),
'BC':lambda: self.collect(),
'BD':lambda: self.apply(lambda i: list(map(int, str(i)))),
'BE':lambda: self.apply(lambda i: i in self.stack[-1]),
'BF':lambda: self.flatten(),
'BG':lambda: Null,
'BH':lambda: Null,
'BI':lambda: Null,
'BJ':lambda: self.apply(lambda i: ''.join(map(str, i))),
'BK':lambda: Null,
'BL':lambda: self.apply(len),
'BM':lambda: self.apply(max),
'BN':lambda: Null,
'BO':lambda: Null,
'BP':lambda: self.apply(lambda x: x[1:]),
'BQ':lambda: self.apply(self.remove_duplicates),
'BR':lambda: self.apply(lambda x: list(range(1, x + 1))),
'BS':lambda: Stack([self.stack[i : i+2] for i in range(len(self.stack) - 1)]),
'BT':lambda: Null,
'BU':lambda: Null,
'BV':lambda: Null,
'BW':lambda: Stack([i for i in self.stack[:-1] if i not in self.stack[-1]]),
'BX':lambda: self.stack.push(random.choice(self.stack.pop())),
'BY':lambda: self.apply(random.choice),
'BZ':lambda: Stack(filter(None, self.stack)),
'B]':lambda: self.wrap(),
'B[':lambda: self.apply(lambda l: [l]),
'B^':lambda: self.apply(lambda l: functools.reduce(operator.xor, l)),
'B_':lambda: self.apply(lambda l: functools.reduce(operator.sub, l)),
'B`':lambda: self.apply(lambda l: functools.reduce(operator.pow, l)),
'Ba':lambda: self.stack.push(self.stack.pop() & self.stack.pop()),
'Bb':lambda: self.stack.push(int(self.stack.pop(), self.stack.pop())),
'Bc':lambda: self.columns(),
'Bd':lambda: self.apply(lambda l: functools.reduce(operator.floordiv, l)),
'Be':lambda: self.stack.push([i in self.stack[-1] for i in self.stack.pop()]),
'Bf':lambda: self.stack.push(~self.stack.pop()),
'Bg':lambda: Null,
'Bh':lambda: Null,
'Bi':lambda: self.apply(int),
'Bj':lambda: Null,
'Bk':lambda: Null,
'Bl':lambda: Null,
'Bm':lambda: self.apply(min),
'Bn':lambda: self.apply(lambda i: -i),
'Bo':lambda: self.stack.push(self.stack.pop() | self.stack.pop()),
'Bp':lambda: self.apply(lambda x: x[:-1]),
'Bq':lambda: Null,
'Br':lambda: Null,
'Bs':lambda: self.apply(sum),
'Bt':lambda: Null,
'Bu':lambda: Null,
'Bv':lambda: self.apply(lambda i: int(''.join(map(str, i)))),
'Bw':lambda: Stack([i for i in self.stack[:-1] if i in self.stack[-1]]),
'Bx':lambda: self.stack.push(self.stack.pop() ^ self.stack.pop()),
'By':lambda: Null,
'Bz':lambda: Null,
'B|':lambda: self.apply(lambda l: functools.reduce(operator.or_, l)),
'B~':lambda: self.apply(operator.inv),
'b!':lambda: self.stack.push(list(map(operator.not_, self.stack.pop()))),
'b#':lambda: Null,
'b$':lambda: Null,
'b%':lambda: self.stack.push(functools.reduce(operator.mod, self.stack.pop())),
'b&':lambda: self.stack.push(functools.reduce(operator.and_, self.stack.pop())),
"b'":lambda: self.stack.push([i * 2 for i in self.stack.pop()]),
'b(':lambda: self.stacks[self.index - 1].push(self.stack.pop()),
'b)':lambda: self.stacks[(self.index + 1) % len(self.stacks)].push(self.stack.pop()),
'b*':lambda: self.stack.push(functools.reduce(operator.mul, self.stack.pop())),
'b+':lambda: self.stack.push(functools.reduce(operator.add, self.stack.pop())),
'b/':lambda: self.stack.push(functools.reduce(operator.truediv, self.stack.pop())),
'b:':lambda: Null,
'b<':lambda: Null,
'b=':lambda: self.stack.push(self.eq(*self.stack.pop())),
'b>':lambda: Null,
'b?':lambda: Null,
'b@':lambda: Null,
'bA':lambda: Null,
'bB':lambda: Null,
'bC':lambda: Null,
'bD':lambda: Null,
'bE':lambda: Null,
'bF':lambda: self.stack.push(self.flatten(self.stack.pop())),
'bG':lambda: Null,
'bH':lambda: Null,
'bI':lambda: Null,
'bJ':lambda: Null,
'bK':lambda: Null,
'bL':lambda: self.stack.push(len(self.stack.pop())),
'bM':lambda: self.stack.push(max(self.stack.pop())),
'bN':lambda: Null,
'bO':lambda: Null,
'bP':lambda: Null,
'bQ':lambda: Null,
'bR':lambda: self.stack.push(self.stack.pop()[::-1]),
'bS':lambda: Null,
'bT':lambda: Null,
'bU':lambda: self.stack.push(*self.stack.pop()),
'bV':lambda: Null,
'bW':lambda: Null,
'bX':lambda: Null,
'bY':lambda: Null,
'bZ':lambda: Null,
'b[':lambda: Null,
'b]':lambda: self.stack.push([self.stack.pop()]),
'b^':lambda: self.stack.push(functools.reduce(operator.xor, self.stack.pop())),
'b_':lambda: self.stack.push(functools.reduce(operator.sub, self.stack.pop())),
'b`':lambda: self.stack.push(functools.reduce(operator.pow, self.stack.pop())),
'ba':lambda: Null,
'bb':lambda: Null,
'bc':lambda: Null,
'bd':lambda: self.stack.push(functools.reduce(operator.floordiv, self.stack.pop())),
'be':lambda: Null,
'bf':lambda: Null,
'bg':lambda: Null,
'bh':lambda: Null,
'bi':lambda: Null,
'bj':lambda: Null,
'bk':lambda: Null,
'bl':lambda: Null,
'bm':lambda: self.stack.push(min(self.stack.pop())),
'bn':lambda: Null,
'bo':lambda: Null,
'bp':lambda: Null,
'bq':lambda: Null,
'br':lambda: Null,
'bs':lambda: Null,
'bt':lambda: Null,
'bu':lambda: Null,
'bv':lambda: Null,
'bw':lambda: Null,
'bx':lambda: Null,
'by':lambda: Null,
'bz':lambda: Null,
'b|':lambda: self.stack.push(functools.reduce(operator.or_, self.stack.pop())),
'b~':lambda: self.stack.push(list(map(operator.inv, self.stack.pop()))),
}
def apply(self, func, array = False):
if array:
return Stack(map(lambda v: list(func(v)), self.stack))
return Stack(map(func, self.stack))
def collect(self):
array = []
sub_array = []
for element in self.stack:
if type(element) == list:
if sub_array:
array.append(sub_array)
sub_array = []
array.append(element)
else:
sub_array.append(element)
if sub_array:
array.append(sub_array)
self.stacks[self.index] = Stack(array)
def columns(self):
self.stacks[self.index] = Stack(map(list, zip(*self.stack)))
def decrement(self):
self.index -= 1
def eq(self, *args):
incs = [args[i] == args[i-1] for i in range(1, len(args))]
return all(incs)
def factors(self):
lof = []
x = self.stack.pop()
if type(x) == str:
return list(x)
for i in range(1,int(x)):
if x%i == 0:
lof.append(i)
return lof
def flatten(self):
def flatten_array(array):
flat = []
if type(array) == list:
for item in array:
flat += flatten_array(item)
else:
flat.append(array)
return flat
copy = flatten_array(list(self.stack))
self.stack.clear()
self.stack.push(*copy)
def increment(self):
self.index += 1
def join(self, char='\n'):
newstack = Stack()
newstack.push(char.join(map(str, self.stack)))
self.stacks[self.index] = newstack
def pad_bin(self):
copy = self.stack.copy()
length = max(map(lambda a: len(bin(a)[2:]), copy))
for i in range(len(self.stack)):
self.stacks[self.index][i] = Stack(map(eval_, bin(self.stack[i])[2:].rjust(length, '0')))
def remove(self, even_odd):
self.stacks[self.index] = Stack(filter(lambda x: x%2 == int(bool(even_odd)), self.stack))
def remove_duplicates(self, array=None):
final = []
if array is None: array = self.stack
for s in array:
if s not in final:
final.append(s)
return final
def run(self,flag,text):
ret = self.stacks[self.index]
if flag:
return ret
if text:
return ''.join(list(map(StackScript.stringify, ret)))
return ret.pop()
def run_lambda(self, index):
lamb = self.functions['lambda {}'.format(index)]
self.prevcall = lamb(*self.stack)
self.stack.clear()
self.stack.push(self.prevcall)
def store(self, value):
self.register = value
@staticmethod
def stringify(value):
try:
return chr(int(abs(value)))
except:
return str(value)
def wrap(self):
array = self.stack.copy()
self.stack.clear()
self.stack.push(array)
class Function:
def __init__(self, name, args, code, line, g_code, outerf, *flags):
self.name = name
self.args = args if args != -1 else 0
self.lamb = args == -1
self.code = code
self.stack = Stack()
self.flags = list(flags)
self.line = line
self.gen = g_code
self.outerf = outerf
def __call__(self, *args, funccall = False):
if not self.flags[2]:
args = list(args)[:self.args]
while len(args) != self.args:
args.append(-1)
if self.flags[4]:
self.stack.push(list(args))
else:
self.stack.push(*args)
script = StackScript(self.code, args, self.outerf, self.stack, self.line, self.gen)
value = script.run(*self.flags[:2])
self.stack = Stack()
if self.flags[3]:
print(value)
if funccall:
return value
else:
return Null(value)
return int(value) if type(value) == bool else value
def __repr__(self):
return '<Function ${}: {}>'.format(self.name, self.code)
class Script:
def process(self, lines):
final = ['']
for line in lines:
if line.startswith((' ', '\t')):
final[-1] += line.split(';')[0].strip()
else:
final.append(line.split(';')[0].strip())
return list(filter(None, map(lambda a: a.strip(','), final)))
def __init__(self,code, inputs, _, __):
self.NILADS = r'!~&#@NPOHSQVG'
self.MONADS = r'+-*/\^><%=R'
self.CONSTRUCTS = 'FWEIDL'
self.code = self.process(code.split('\n'))
self.called = False
self.implicit = False
self.stored = []
self.string = ''
self.functions = {}
self.y = 0
self.x = 0
self.line = 0
self.I = 0
for cmd in self.code:
self.line += 1
if cmd[0] in self.CONSTRUCTS:
if cmd[:2] == 'EX':
loop = cmd.split(',')[1:]
for element in self.stored:
for chunk in loop:
self.run_chunk(chunk, x=element)
elif cmd[:2] == 'EY':
loop = cmd.split(',')[1:]
for element in self.stored:
for chunk in loop:
self.run_chunk(chunk, y=element)
elif cmd[:2] == 'W=':
loop = cmd.split(',')[1:]
while self.x == self.y:
for chunk in loop:
self.run_chunk(chunk)
elif cmd[:2] == 'W!':
loop = cmd.split(',')[1:]
while self.x != self.y:
for chunk in loop:
self.run_chunk(chunk)
elif cmd[0] == 'F':
loop = cmd.split(',')[1:]
for _ in range(self.x):
for chunk in loop:
self.run_chunk(chunk)
elif cmd[0] == 'I':
loop = cmd.split(',')[1:]
if self.x:
for chunk in loop:
self.run_chunk(chunk)
elif cmd[0] == 'W':
loop = cmd.split(',')[1:]
while self.x:
for chunk in loop:
self.run_chunk(chunk)
elif cmd[0] == 'D':
cmd = cmd.split(',')
func_name = cmd[1]
func_args = cmd[2].count('@')
func_flags = []
for flag in '*^?:!':
func_flags.append(flag in cmd[2])
func_code = ','.join(cmd[3:])
self.functions[func_name] = Function(func_name, func_args, func_code,
self.line, code, self.functions, *func_flags)
elif cmd[0] == 'L':
cmd = cmd.split(',')
flags = cmd[0][1:]
lambda_c = ','.join(cmd[1:])
lambda_n = len(list(filter(lambda a: bool(re.search(r'^lambda \d+$', a)), self.functions.keys()))) + 1
name = 'lambda {}'.format(lambda_n)
lambda_f = []
for flag in '*^?:!':
lambda_f.append(flag == '?' or flag in flags)
self.functions[name] = Function(name, -1, lambda_c, self.line,
code, self.functions, *lambda_f)
else:
self.implicit = True
if cmd[:2] in ['x:', 'y:']:
if cmd[0] == 'x': acc = self.x; acc_n = 'x'
else: acc = self.y; acc_n = 'y'
c = cmd[2:]
if c == '?':
try: acc = inputs[self.I]; self.I += 1
except: acc = 0
elif c == 'G':
try: acc = self.stored.pop()
except: raise error.EmptySecondStackError(self.line, self.code[self.line-1])
elif c == 'x': acc = self.x
elif c == 'y': acc = self.y
elif c == 'g': acc = self.stored[-1]
else: acc = eval_(c)
if acc_n == 'x': self.x = acc
if acc_n == 'y': self.y = acc
elif cmd[0] == '$':
self.called = True
cmd = cmd.split('>')
try: func = self.functions[cmd[0][1:]]
except: raise error.UnableToRetrieveFunctionError(self.line, self.code[self.line-1], cmd[0][1:])
args = []
for c in cmd[1:]:
if c == '?':
try: args.append(inputs[self.I]); self.I += 1
except: args.append(0)
elif c == 'G':
try: args.append(self.stored.pop())
except: raise error.EmptySecondStackError(self.line, self.code[self.line-1])
elif c == 'x': args.append(self.x)
elif c == 'y': args.append(self.y)
elif c == 'g': args.append(self.stored[-1])
elif c == '_': args += self.stored
else: args.append(eval_(c))
value = func(*args)
if type(value) == Null: value = value.value
if type(value) == str: self.stored.append(value)
if type(value) == list:
for v in value:
self.stored.append(v)
self.x = value
else:
self.run_chunk(cmd)
if not self.called and self.functions:
func = self.functions[list(self.functions.keys())[0]]
if self.I < len(inputs): result = func(*inputs[self.I:])
elif self.x:
if self.y: result = func(self.x, self.y)
else: result = func(self.x)
else: result = func()
if type(result) != Null and not self.implicit:
print(result)
def run_chunk(self, cmd, x=None, y=None):
if x is not None: self.x = x
if y is not None: self.y = y
symbol = cmd[0]
if symbol == "_":
for i in inputs:
self.stored.append(i)
if symbol == '}': self.x, self.y = self.y, self.x
if len(cmd) > 1: value = eval_(cmd[1:])
else: value = None
if cmd[:2] in ['x:', 'y:']:
if cmd[0] == 'x': acc = self.x; acc_n = 'x'
else: acc = self.y; acc_n = 'y'
c = cmd[2:]
if c == '?':
try: acc = inputs[self.I]; self.I += 1
except: acc = 0
elif c == 'G':
try: acc = self.stored.pop()
except: raise error.EmptySecondStackError(self.line, self.code[self.line-1])
elif c == 'x': acc = self.x
elif c == 'y': acc = self.y
elif c == 'g': acc = self.stored[-1]
else: acc = eval_(c)
if acc_n == 'x': self.x = acc
if acc_n == 'y': self.y = acc
elif symbol == '$':
self.called = True
cmd = cmd.split('>')
try: func = self.functions[cmd[0][1:]]
except: raise error.UnableToRetrieveFunctionError(self.line, self.code[self.line-1], cmd[0][1:])
args = []
for c in cmd[1:]:
if c == '?':
try: args.append(inputs[self.I]); self.I += 1
except: args.append(0)
elif c == 'G':
try: args.append(self.stored.pop())
except: raise error.EmptySecondStackError(self.line, self.code[self.line-1])
elif c == 'x': args.append(self.x)
elif c == 'y': args.append(self.y)
elif c == 'g': args.append(self.stored[-1])
elif c == '_': args += self.stored
else: args.append(eval_(c))
value = func(*args)
if type(value) == Null: value = value.value
if type(value) == str: self.stored.append(value)
if type(value) == list:
for v in value:
self.stored.append(v)
self.x = value
elif value is not None:
if value == '?':
try: value = inputs[self.I]; self.I += 1
except: raise error.NoMoreInputError(self.line, self.code[self.line-1])
if value == 'G':
try: value = self.stored.pop()
except: raise error.EmptySecondStackError(line, self.code[line-1])
if value == 'g': value = self.stored[-1]
if value == 'x': value = self.x
if value == 'y': value = self.y
try: self.x = self.COMMANDS[symbol](value)
except ZeroDivisionError: raise error.DivisionByZeroError(self.line, self.code[self.line-1])
except: raise error.InvalidSymbolError(self.line, self.code[self.line-1], symbol)
else:
try: v = self.COMMANDS[symbol]()
except:
if symbol == '?':
try: v = inputs[self.I]; self.I += 1
except: raise error.NoMoreInputError(self.line, self.code[self.line-1])
else: v = None
if v is None: return
self.x = v
def __call__(self, *values):
return None
@property
def COMMANDS(self):
return {'+':self.add,
'-':self.minus,
'*':self.times,
'/':self.divide,
'\\':self.int_divide,
'^':self.power,
'>':self.greater,
'<':self.less,
'!':self.factorial,
'%':self.modulo,
'~':self.negative,
'=':self.equal,
'&':self.next,
'#':self.double,
'@':self.half,
'|':self.notequals,
'N':self.not_,
'P':self.print,
'O':self.print_,
'H':self._print,
'R':self.randint,
'S':self.sqrt,
'V':self.store,
'G':self.get,
'F':self,
'I':self,
'W':self,
'E':self,
'D':self,
'L':self}
def add(self, y): return self.x + y
def minus(self, y): return self.x - y
def times(self, y): return self.x * y
def divide(self, y): return self.x / y
def int_divide(self, y): return int(self.x / y)
def power(self, y): return self.x ** y
def greater(self, y): return int(self.x > y)
def less(self, y): return int(self.x < y)
def factorial(self): return math.factorial(self.x)
def modulo(self, y): return self.x % y
def negative(self): return -self.x
def equal(self, y): return int(self.x == y)
def next(self): self.string += chr(self.x)
def double(self): return self.x * 2
def half(self): return self.x / 2
def notequals(self, y): return int(self.x != y)
def not_(self): return int(not self.x)
def print_(self): print(self.x)
def sqrt(self): return math.sqrt(self.x)
def store(self): self.stored.append(self.x)
def get(self): self.x = self.stored.pop()
def _print(self):
if self.string: print(end=self.string)
else: print(end=chr(self.x))
def print(self):
if self.string: print(self.string)
else: print(chr(self.x))
def randint(self,y=0):
if y > self.x: return random.randint(self.x, y)
return random.randint(y, self.x)
|
the-stack_106_30216 | import torch
import torch.nn as nn
from collections import OrderedDict
from ptsemseg.utils import initialize_weights
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
if bn_size is not None:
self.add_module('norm.1', nn.BatchNorm2d(num_input_features))
self.add_module('relu.1', nn.ReLU(inplace=True))
self.add_module('conv.1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1, bias=False))
self.add_module('norm.2', nn.BatchNorm2d(bn_size * growth_rate))
self.add_module('relu.2', nn.ReLU(inplace=True))
self.add_module('conv.2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False))
else:
self.add_module('norm.1', nn.BatchNorm2d(num_input_features))
self.add_module('relu.1', nn.ReLU(inplace=True))
self.add_module('conv.1', nn.Conv2d(num_input_features, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False))
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return torch.cat([x, new_features], 1)
class _TransitionDown(nn.Sequential):
def __init__(self, num_input_features, num_output_features, drop_rate):
super(_TransitionDown, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('dropout', nn.Dropout2d(p=drop_rate))
self.add_module('pool', nn.MaxPool2d(kernel_size=2, stride=2))
class _TransitionUp(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_TransitionUp, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('upconv', nn.ConvTranspose2d(num_input_features, num_output_features,
kernel_size=4, stride=2, bias=False))
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, num_input_features, growth_rate, drop_rate, bn_size=None):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate)
self.add_module('denselayer%d' % (i + 1), layer)
class tiramisu(nn.Module):
def __init__(self, n_classes, num_init_features, growth_rate, encoder_cf, bottleneck_cf, decoder_cf):
super(tiramisu, self).__init__()
encoder_cf_parts = encoder_cf.split('-')
decoder_cf_parts = decoder_cf.split('-')
encoder_cf = [int(x) for x in encoder_cf_parts]
decoder_cf = [int(x) for x in decoder_cf_parts]
compression = 1
bn_size = 1
drop_rate = 0.2
num_features = num_init_features
self.num_encoder_blocks = len(encoder_cf)
self.growth_rate = growth_rate
self.n_classes = n_classes
#First convolution (our backbone is same as a pure densenet)
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_features, kernel_size=3, stride=1, padding=1, bias=False)),
]))
#Encoder denseblocks
for i, num_layers in enumerate(encoder_cf):
block = _DenseBlock(num_layers=num_layers,
num_input_features=num_features,
growth_rate=growth_rate,
drop_rate=drop_rate,
)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
# if i != len(encoder_cf) - 1:
block = _TransitionDown(num_input_features=num_features,
num_output_features=int(num_features * compression), drop_rate=drop_rate)
self.features.add_module('transition-down%d' % (i + 1), block)
num_features = int(num_features * compression)
#Bottleneck in the middle
block = _DenseBlock(num_layers=bottleneck_cf,
num_input_features=num_features,
growth_rate=growth_rate,
drop_rate=drop_rate)
self.features.add_module('bottleneck', block)
num_features = bottleneck_cf * growth_rate
#The first transposed convolution
block = _TransitionUp(num_input_features=num_features,
num_output_features=num_features)
self.features.add_module('transition-up1', block)
total = 0
sums = []
for v in encoder_cf:
total+=v
sums.append(total)
num_features_shortcuts = [num_init_features + x * growth_rate for x in sums]
num_features_shortcuts = num_features_shortcuts[::-1]
temp = [bottleneck_cf] + decoder_cf
temp = temp[:-1]
num_features_from_below = [x * growth_rate for x in temp]
#number of layers to be fed to each layer in decoder
num_features_dec = [sum(x) for x in zip(num_features_shortcuts, num_features_from_below)]
num_features_from_below = num_features_from_below[1:]
#Decoder denseblocks
for i, num_layers in enumerate(decoder_cf):
block = _DenseBlock(num_layers=num_layers,
num_input_features=num_features_dec[i],
growth_rate=growth_rate,
drop_rate=drop_rate)
self.features.add_module('denseblock-up%d' % (i + 1), block)
if i != len(decoder_cf) - 1:
block = _TransitionUp(num_input_features=num_features_from_below[i],
num_output_features=num_features_from_below[i])
self.features.add_module('transition-up%d' % (i + 2), block)
filters = num_features_from_below[-1] + num_features_shortcuts[-1] + decoder_cf[-1] * growth_rate
block = nn.Conv2d(filters, n_classes, 1)
self.features.add_module('predictionlayer', block)
initialize_weights(self.features)
def forward(self, x):
x = self.features[0](x) # Very first convolution
keep_shortcuts = []
x = self.features[1](x) # Denseblock-down1
keep_shortcuts.append(x)
x = self.features[2](x) # Transition-down1
x = self.features[3](x) # Denseblock-down2
keep_shortcuts.append(x)
x = self.features[4](x) # Transition-down2
x = self.features[5](x) # Denseblock-down3
keep_shortcuts.append(x)
x = self.features[6](x) # Transition-down3
x = self.features[7](x) # Denseblock-down4
keep_shortcuts.append(x)
x = self.features[8](x) # Transition-down4
x = self.features[9](x) # Denseblock-down5
keep_shortcuts.append(x)
x = self.features[10](x) # Transition-down5
keep_shortcuts = keep_shortcuts[::-1]
keep = []
for name, layer in self.features[11].named_children():
x = layer(x)
keep.append(x.narrow(1,0, self.growth_rate))
x = self.features[12](torch.cat(keep,1)) # Transition-up1
x = torch.cat((x[:,:,1:-1,1:-1], keep_shortcuts[0]),1)
del keep[:]
for name, layer in self.features[13].named_children():
x = layer(x)
keep.append(x.narrow(1,0, self.growth_rate))
x = self.features[14](torch.cat(keep,1)) # Transition-up2)
x = torch.cat((x[:,:,1:-1,1:-1], keep_shortcuts[1]),1)
del keep[:]
for name, layer in self.features[15].named_children():
x = layer(x)
keep.append(x.narrow(1,0, self.growth_rate))
x = self.features[16](torch.cat(keep,1)) # Transition-up3
x = torch.cat((x[:,:,1:-1,1:-1], keep_shortcuts[2]),1)
del keep[:]
for name, layer in self.features[17].named_children():
x = layer(x)
keep.append(x.narrow(1,0, self.growth_rate))
x = self.features[18](torch.cat(keep,1)) # Transition-up4
x = torch.cat((x[:,:,1:-1,1:-1], keep_shortcuts[3]),1)
del keep[:]
for name, layer in self.features[19].named_children():
x = layer(x)
keep.append(x.narrow(1,0, self.growth_rate))
x = self.features[20](torch.cat(keep,1)) # Transition-up5
x = torch.cat((x[:,:,1:-1,1:-1], keep_shortcuts[4]),1)
x = self.features[21](x)
x = self.features[22](x) # Final layer 1x1 conv
#x = x.permute(0, 2, 3, 1).contiguous().view(-1, self.n_classes)
# out = nn.functional.log_softmax(x)
out = x
return out
|
the-stack_106_30219 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.tests.test import Test
from ducktape.mark.resource import cluster
from ducktape.utils.util import wait_until
from ducktape.mark import matrix, parametrize
from ducktape.cluster.remoteaccount import RemoteCommandError
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService, config_property
from kafkatest.services.connect import ConnectDistributedService, VerifiableSource, VerifiableSink, ConnectRestError, MockSink, MockSource
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.version import DEV_BRANCH, LATEST_2_3, LATEST_2_2, LATEST_2_1, LATEST_2_0, LATEST_1_1, LATEST_1_0, LATEST_0_11_0, LATEST_0_10_2, LATEST_0_10_1, LATEST_0_10_0, LATEST_0_9, LATEST_0_8_2, KafkaVersion
from collections import Counter, namedtuple
import itertools
import json
import operator
import time
class ConnectDistributedTest(Test):
"""
Simple test of Kafka Connect in distributed mode, producing data from files on one cluster and consuming it on
another, validating the total output is identical to the input.
"""
FILE_SOURCE_CONNECTOR = 'org.apache.kafka.connect.file.FileStreamSourceConnector'
FILE_SINK_CONNECTOR = 'org.apache.kafka.connect.file.FileStreamSinkConnector'
INPUT_FILE = "/mnt/connect.input"
OUTPUT_FILE = "/mnt/connect.output"
TOPIC = "test"
OFFSETS_TOPIC = "connect-offsets"
OFFSETS_REPLICATION_FACTOR = "1"
OFFSETS_PARTITIONS = "1"
CONFIG_TOPIC = "connect-configs"
CONFIG_REPLICATION_FACTOR = "1"
STATUS_TOPIC = "connect-status"
STATUS_REPLICATION_FACTOR = "1"
STATUS_PARTITIONS = "1"
SCHEDULED_REBALANCE_MAX_DELAY_MS = "60000"
CONNECT_PROTOCOL="sessioned"
# Since tasks can be assigned to any node and we're testing with files, we need to make sure the content is the same
# across all nodes.
FIRST_INPUT_LIST = ["foo", "bar", "baz"]
FIRST_INPUTS = "\n".join(FIRST_INPUT_LIST) + "\n"
SECOND_INPUT_LIST = ["razz", "ma", "tazz"]
SECOND_INPUTS = "\n".join(SECOND_INPUT_LIST) + "\n"
SCHEMA = { "type": "string", "optional": False }
def __init__(self, test_context):
super(ConnectDistributedTest, self).__init__(test_context)
self.num_zk = 1
self.num_brokers = 1
self.topics = {
self.TOPIC: {'partitions': 1, 'replication-factor': 1}
}
self.zk = ZookeeperService(test_context, self.num_zk)
self.key_converter = "org.apache.kafka.connect.json.JsonConverter"
self.value_converter = "org.apache.kafka.connect.json.JsonConverter"
self.schemas = True
def setup_services(self, security_protocol=SecurityConfig.PLAINTEXT, timestamp_type=None, broker_version=DEV_BRANCH, auto_create_topics=False):
self.kafka = KafkaService(self.test_context, self.num_brokers, self.zk,
security_protocol=security_protocol, interbroker_security_protocol=security_protocol,
topics=self.topics, version=broker_version,
server_prop_overides=[["auto.create.topics.enable", str(auto_create_topics)]])
if timestamp_type is not None:
for node in self.kafka.nodes:
node.config[config_property.MESSAGE_TIMESTAMP_TYPE] = timestamp_type
self.cc = ConnectDistributedService(self.test_context, 3, self.kafka, [self.INPUT_FILE, self.OUTPUT_FILE])
self.cc.log_level = "DEBUG"
self.zk.start()
self.kafka.start()
def _start_connector(self, config_file):
connector_props = self.render(config_file)
connector_config = dict([line.strip().split('=', 1) for line in connector_props.split('\n') if line.strip() and not line.strip().startswith('#')])
self.cc.create_connector(connector_config)
def _connector_status(self, connector, node=None):
try:
return self.cc.get_connector_status(connector, node)
except ConnectRestError:
return None
def _connector_has_state(self, status, state):
return status is not None and status['connector']['state'] == state
def _task_has_state(self, task_id, status, state):
if not status:
return False
tasks = status['tasks']
if not tasks:
return False
for task in tasks:
if task['id'] == task_id:
return task['state'] == state
return False
def _all_tasks_have_state(self, status, task_count, state):
if status is None:
return False
tasks = status['tasks']
if len(tasks) != task_count:
return False
return reduce(operator.and_, [task['state'] == state for task in tasks], True)
def is_running(self, connector, node=None):
status = self._connector_status(connector.name, node)
return self._connector_has_state(status, 'RUNNING') and self._all_tasks_have_state(status, connector.tasks, 'RUNNING')
def is_paused(self, connector, node=None):
status = self._connector_status(connector.name, node)
return self._connector_has_state(status, 'PAUSED') and self._all_tasks_have_state(status, connector.tasks, 'PAUSED')
def connector_is_running(self, connector, node=None):
status = self._connector_status(connector.name, node)
return self._connector_has_state(status, 'RUNNING')
def connector_is_failed(self, connector, node=None):
status = self._connector_status(connector.name, node)
return self._connector_has_state(status, 'FAILED')
def task_is_failed(self, connector, task_id, node=None):
status = self._connector_status(connector.name, node)
return self._task_has_state(task_id, status, 'FAILED')
def task_is_running(self, connector, task_id, node=None):
status = self._connector_status(connector.name, node)
return self._task_has_state(task_id, status, 'RUNNING')
@cluster(num_nodes=5)
@matrix(connect_protocol=['sessioned', 'compatible', 'eager'])
def test_restart_failed_connector(self, connect_protocol):
self.CONNECT_PROTOCOL = connect_protocol
self.setup_services()
self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node))
self.cc.start()
self.sink = MockSink(self.cc, self.topics.keys(), mode='connector-failure', delay_sec=5)
self.sink.start()
wait_until(lambda: self.connector_is_failed(self.sink), timeout_sec=15,
err_msg="Failed to see connector transition to the FAILED state")
self.cc.restart_connector(self.sink.name)
wait_until(lambda: self.connector_is_running(self.sink), timeout_sec=10,
err_msg="Failed to see connector transition to the RUNNING state")
@cluster(num_nodes=5)
@matrix(connector_type=['source', 'sink'], connect_protocol=['sessioned', 'compatible', 'eager'])
def test_restart_failed_task(self, connector_type, connect_protocol):
self.CONNECT_PROTOCOL = connect_protocol
self.setup_services()
self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node))
self.cc.start()
connector = None
if connector_type == "sink":
connector = MockSink(self.cc, self.topics.keys(), mode='task-failure', delay_sec=5)
else:
connector = MockSource(self.cc, mode='task-failure', delay_sec=5)
connector.start()
task_id = 0
wait_until(lambda: self.task_is_failed(connector, task_id), timeout_sec=20,
err_msg="Failed to see task transition to the FAILED state")
self.cc.restart_task(connector.name, task_id)
wait_until(lambda: self.task_is_running(connector, task_id), timeout_sec=10,
err_msg="Failed to see task transition to the RUNNING state")
@cluster(num_nodes=5)
@matrix(connect_protocol=['sessioned', 'compatible', 'eager'])
def test_pause_and_resume_source(self, connect_protocol):
"""
Verify that source connectors stop producing records when paused and begin again after
being resumed.
"""
self.CONNECT_PROTOCOL = connect_protocol
self.setup_services()
self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node))
self.cc.start()
self.source = VerifiableSource(self.cc, topic=self.TOPIC)
self.source.start()
wait_until(lambda: self.is_running(self.source), timeout_sec=30,
err_msg="Failed to see connector transition to the RUNNING state")
self.cc.pause_connector(self.source.name)
# wait until all nodes report the paused transition
for node in self.cc.nodes:
wait_until(lambda: self.is_paused(self.source, node), timeout_sec=30,
err_msg="Failed to see connector transition to the PAUSED state")
# verify that we do not produce new messages while paused
num_messages = len(self.source.sent_messages())
time.sleep(10)
assert num_messages == len(self.source.sent_messages()), "Paused source connector should not produce any messages"
self.cc.resume_connector(self.source.name)
for node in self.cc.nodes:
wait_until(lambda: self.is_running(self.source, node), timeout_sec=30,
err_msg="Failed to see connector transition to the RUNNING state")
# after resuming, we should see records produced again
wait_until(lambda: len(self.source.sent_messages()) > num_messages, timeout_sec=30,
err_msg="Failed to produce messages after resuming source connector")
@cluster(num_nodes=5)
@matrix(connect_protocol=['sessioned', 'compatible', 'eager'])
def test_pause_and_resume_sink(self, connect_protocol):
"""
Verify that sink connectors stop consuming records when paused and begin again after
being resumed.
"""
self.CONNECT_PROTOCOL = connect_protocol
self.setup_services()
self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node))
self.cc.start()
# use the verifiable source to produce a steady stream of messages
self.source = VerifiableSource(self.cc, topic=self.TOPIC)
self.source.start()
wait_until(lambda: len(self.source.committed_messages()) > 0, timeout_sec=30,
err_msg="Timeout expired waiting for source task to produce a message")
self.sink = VerifiableSink(self.cc, topics=[self.TOPIC])
self.sink.start()
wait_until(lambda: self.is_running(self.sink), timeout_sec=30,
err_msg="Failed to see connector transition to the RUNNING state")
self.cc.pause_connector(self.sink.name)
# wait until all nodes report the paused transition
for node in self.cc.nodes:
wait_until(lambda: self.is_paused(self.sink, node), timeout_sec=30,
err_msg="Failed to see connector transition to the PAUSED state")
# verify that we do not consume new messages while paused
num_messages = len(self.sink.received_messages())
time.sleep(10)
assert num_messages == len(self.sink.received_messages()), "Paused sink connector should not consume any messages"
self.cc.resume_connector(self.sink.name)
for node in self.cc.nodes:
wait_until(lambda: self.is_running(self.sink, node), timeout_sec=30,
err_msg="Failed to see connector transition to the RUNNING state")
# after resuming, we should see records consumed again
wait_until(lambda: len(self.sink.received_messages()) > num_messages, timeout_sec=30,
err_msg="Failed to consume messages after resuming sink connector")
@cluster(num_nodes=5)
@matrix(connect_protocol=['sessioned', 'compatible', 'eager'])
def test_pause_state_persistent(self, connect_protocol):
"""
Verify that paused state is preserved after a cluster restart.
"""
self.CONNECT_PROTOCOL = connect_protocol
self.setup_services()
self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node))
self.cc.start()
self.source = VerifiableSource(self.cc, topic=self.TOPIC)
self.source.start()
wait_until(lambda: self.is_running(self.source), timeout_sec=30,
err_msg="Failed to see connector transition to the RUNNING state")
self.cc.pause_connector(self.source.name)
self.cc.restart()
if connect_protocol == 'compatible':
timeout_sec = 120
else:
timeout_sec = 30
# we should still be paused after restarting
for node in self.cc.nodes:
wait_until(lambda: self.is_paused(self.source, node), timeout_sec=timeout_sec,
err_msg="Failed to see connector startup in PAUSED state")
@cluster(num_nodes=6)
@matrix(security_protocol=[SecurityConfig.PLAINTEXT, SecurityConfig.SASL_SSL], connect_protocol=['sessioned', 'compatible', 'eager'])
def test_file_source_and_sink(self, security_protocol, connect_protocol):
"""
Tests that a basic file connector works across clean rolling bounces. This validates that the connector is
correctly created, tasks instantiated, and as nodes restart the work is rebalanced across nodes.
"""
self.CONNECT_PROTOCOL = connect_protocol
self.setup_services(security_protocol=security_protocol)
self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node))
self.cc.start()
self.logger.info("Creating connectors")
self._start_connector("connect-file-source.properties")
self._start_connector("connect-file-sink.properties")
# Generating data on the source node should generate new records and create new output on the sink node. Timeouts
# here need to be more generous than they are for standalone mode because a) it takes longer to write configs,
# do rebalancing of the group, etc, and b) without explicit leave group support, rebalancing takes awhile
for node in self.cc.nodes:
node.account.ssh("echo -e -n " + repr(self.FIRST_INPUTS) + " >> " + self.INPUT_FILE)
wait_until(lambda: self._validate_file_output(self.FIRST_INPUT_LIST), timeout_sec=70, err_msg="Data added to input file was not seen in the output file in a reasonable amount of time.")
# Restarting both should result in them picking up where they left off,
# only processing new data.
self.cc.restart()
if connect_protocol == 'compatible':
timeout_sec = 150
else:
timeout_sec = 70
for node in self.cc.nodes:
node.account.ssh("echo -e -n " + repr(self.SECOND_INPUTS) + " >> " + self.INPUT_FILE)
wait_until(lambda: self._validate_file_output(self.FIRST_INPUT_LIST + self.SECOND_INPUT_LIST), timeout_sec=timeout_sec, err_msg="Sink output file never converged to the same state as the input file")
@cluster(num_nodes=6)
@matrix(clean=[True, False], connect_protocol=['sessioned', 'compatible', 'eager'])
def test_bounce(self, clean, connect_protocol):
"""
Validates that source and sink tasks that run continuously and produce a predictable sequence of messages
run correctly and deliver messages exactly once when Kafka Connect workers undergo clean rolling bounces.
"""
num_tasks = 3
self.CONNECT_PROTOCOL = connect_protocol
self.setup_services()
self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node))
self.cc.start()
self.source = VerifiableSource(self.cc, topic=self.TOPIC, tasks=num_tasks, throughput=100)
self.source.start()
self.sink = VerifiableSink(self.cc, tasks=num_tasks, topics=[self.TOPIC])
self.sink.start()
for _ in range(3):
for node in self.cc.nodes:
started = time.time()
self.logger.info("%s bouncing Kafka Connect on %s", clean and "Clean" or "Hard", str(node.account))
self.cc.stop_node(node, clean_shutdown=clean)
with node.account.monitor_log(self.cc.LOG_FILE) as monitor:
self.cc.start_node(node)
monitor.wait_until("Starting connectors and tasks using config offset", timeout_sec=90,
err_msg="Kafka Connect worker didn't successfully join group and start work")
self.logger.info("Bounced Kafka Connect on %s and rejoined in %f seconds", node.account, time.time() - started)
# Give additional time for the consumer groups to recover. Even if it is not a hard bounce, there are
# some cases where a restart can cause a rebalance to take the full length of the session timeout
# (e.g. if the client shuts down before it has received the memberId from its initial JoinGroup).
# If we don't give enough time for the group to stabilize, the next bounce may cause consumers to
# be shut down before they have any time to process data and we can end up with zero data making it
# through the test.
time.sleep(15)
self.source.stop()
self.sink.stop()
self.cc.stop()
# Validate at least once delivery of everything that was reported as written since we should have flushed and
# cleanly exited. Currently this only tests at least once delivery because the sink task may not have consumed
# all the messages generated by the source task. This needs to be done per-task since seqnos are not unique across
# tasks.
success = True
errors = []
allow_dups = not clean
src_messages = self.source.committed_messages()
sink_messages = self.sink.flushed_messages()
for task in range(num_tasks):
# Validate source messages
src_seqnos = [msg['seqno'] for msg in src_messages if msg['task'] == task]
# Every seqno up to the largest one we ever saw should appear. Each seqno should only appear once because clean
# bouncing should commit on rebalance.
src_seqno_max = max(src_seqnos)
self.logger.debug("Max source seqno: %d", src_seqno_max)
src_seqno_counts = Counter(src_seqnos)
missing_src_seqnos = sorted(set(range(src_seqno_max)).difference(set(src_seqnos)))
duplicate_src_seqnos = sorted([seqno for seqno,count in src_seqno_counts.iteritems() if count > 1])
if missing_src_seqnos:
self.logger.error("Missing source sequence numbers for task " + str(task))
errors.append("Found missing source sequence numbers for task %d: %s" % (task, missing_src_seqnos))
success = False
if not allow_dups and duplicate_src_seqnos:
self.logger.error("Duplicate source sequence numbers for task " + str(task))
errors.append("Found duplicate source sequence numbers for task %d: %s" % (task, duplicate_src_seqnos))
success = False
# Validate sink messages
sink_seqnos = [msg['seqno'] for msg in sink_messages if msg['task'] == task]
# Every seqno up to the largest one we ever saw should appear. Each seqno should only appear once because
# clean bouncing should commit on rebalance.
sink_seqno_max = max(sink_seqnos)
self.logger.debug("Max sink seqno: %d", sink_seqno_max)
sink_seqno_counts = Counter(sink_seqnos)
missing_sink_seqnos = sorted(set(range(sink_seqno_max)).difference(set(sink_seqnos)))
duplicate_sink_seqnos = sorted([seqno for seqno,count in sink_seqno_counts.iteritems() if count > 1])
if missing_sink_seqnos:
self.logger.error("Missing sink sequence numbers for task " + str(task))
errors.append("Found missing sink sequence numbers for task %d: %s" % (task, missing_sink_seqnos))
success = False
if not allow_dups and duplicate_sink_seqnos:
self.logger.error("Duplicate sink sequence numbers for task " + str(task))
errors.append("Found duplicate sink sequence numbers for task %d: %s" % (task, duplicate_sink_seqnos))
success = False
# Validate source and sink match
if sink_seqno_max > src_seqno_max:
self.logger.error("Found sink sequence number greater than any generated sink sequence number for task %d: %d > %d", task, sink_seqno_max, src_seqno_max)
errors.append("Found sink sequence number greater than any generated sink sequence number for task %d: %d > %d" % (task, sink_seqno_max, src_seqno_max))
success = False
if src_seqno_max < 1000 or sink_seqno_max < 1000:
errors.append("Not enough messages were processed: source:%d sink:%d" % (src_seqno_max, sink_seqno_max))
success = False
if not success:
self.mark_for_collect(self.cc)
# Also collect the data in the topic to aid in debugging
consumer_validator = ConsoleConsumer(self.test_context, 1, self.kafka, self.source.topic, consumer_timeout_ms=1000, print_key=True)
consumer_validator.run()
self.mark_for_collect(consumer_validator, "consumer_stdout")
assert success, "Found validation errors:\n" + "\n ".join(errors)
@cluster(num_nodes=6)
@matrix(connect_protocol=['sessioned', 'compatible', 'eager'])
def test_transformations(self, connect_protocol):
self.CONNECT_PROTOCOL = connect_protocol
self.setup_services(timestamp_type='CreateTime')
self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node))
self.cc.start()
ts_fieldname = 'the_timestamp'
NamedConnector = namedtuple('Connector', ['name'])
source_connector = NamedConnector(name='file-src')
self.cc.create_connector({
'name': source_connector.name,
'connector.class': 'org.apache.kafka.connect.file.FileStreamSourceConnector',
'tasks.max': 1,
'file': self.INPUT_FILE,
'topic': self.TOPIC,
'transforms': 'hoistToStruct,insertTimestampField',
'transforms.hoistToStruct.type': 'org.apache.kafka.connect.transforms.HoistField$Value',
'transforms.hoistToStruct.field': 'content',
'transforms.insertTimestampField.type': 'org.apache.kafka.connect.transforms.InsertField$Value',
'transforms.insertTimestampField.timestamp.field': ts_fieldname,
})
wait_until(lambda: self.connector_is_running(source_connector), timeout_sec=30, err_msg='Failed to see connector transition to the RUNNING state')
for node in self.cc.nodes:
node.account.ssh("echo -e -n " + repr(self.FIRST_INPUTS) + " >> " + self.INPUT_FILE)
consumer = ConsoleConsumer(self.test_context, 1, self.kafka, self.TOPIC, consumer_timeout_ms=15000, print_timestamp=True)
consumer.run()
assert len(consumer.messages_consumed[1]) == len(self.FIRST_INPUT_LIST)
expected_schema = {
'type': 'struct',
'fields': [
{'field': 'content', 'type': 'string', 'optional': False},
{'field': ts_fieldname, 'name': 'org.apache.kafka.connect.data.Timestamp', 'type': 'int64', 'version': 1, 'optional': True},
],
'optional': False
}
for msg in consumer.messages_consumed[1]:
(ts_info, value) = msg.split('\t')
assert ts_info.startswith('CreateTime:')
ts = int(ts_info[len('CreateTime:'):])
obj = json.loads(value)
assert obj['schema'] == expected_schema
assert obj['payload']['content'] in self.FIRST_INPUT_LIST
assert obj['payload'][ts_fieldname] == ts
@cluster(num_nodes=5)
@parametrize(broker_version=str(DEV_BRANCH), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='sessioned')
@parametrize(broker_version=str(LATEST_0_11_0), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='sessioned')
@parametrize(broker_version=str(LATEST_0_10_2), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='sessioned')
@parametrize(broker_version=str(LATEST_0_10_1), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='sessioned')
@parametrize(broker_version=str(LATEST_0_10_0), auto_create_topics=True, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='sessioned')
@parametrize(broker_version=str(DEV_BRANCH), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
@parametrize(broker_version=str(LATEST_2_3), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
@parametrize(broker_version=str(LATEST_2_2), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
@parametrize(broker_version=str(LATEST_2_1), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
@parametrize(broker_version=str(LATEST_2_0), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
@parametrize(broker_version=str(LATEST_1_1), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
@parametrize(broker_version=str(LATEST_1_0), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
@parametrize(broker_version=str(LATEST_0_11_0), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
@parametrize(broker_version=str(LATEST_0_10_2), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
@parametrize(broker_version=str(LATEST_0_10_1), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
@parametrize(broker_version=str(LATEST_0_10_0), auto_create_topics=True, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
@parametrize(broker_version=str(DEV_BRANCH), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='eager')
@parametrize(broker_version=str(LATEST_2_3), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='eager')
@parametrize(broker_version=str(LATEST_2_2), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='eager')
@parametrize(broker_version=str(LATEST_2_1), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='eager')
@parametrize(broker_version=str(LATEST_2_0), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='eager')
@parametrize(broker_version=str(LATEST_1_1), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='eager')
@parametrize(broker_version=str(LATEST_1_0), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='eager')
@parametrize(broker_version=str(LATEST_0_11_0), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='eager')
@parametrize(broker_version=str(LATEST_0_10_2), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='eager')
@parametrize(broker_version=str(LATEST_0_10_1), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='eager')
@parametrize(broker_version=str(LATEST_0_10_0), auto_create_topics=True, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='eager')
def test_broker_compatibility(self, broker_version, auto_create_topics, security_protocol, connect_protocol):
"""
Verify that Connect will start up with various broker versions with various configurations.
When Connect distributed starts up, it either creates internal topics (v0.10.1.0 and after)
or relies upon the broker to auto-create the topics (v0.10.0.x and before).
"""
self.CONNECT_PROTOCOL = connect_protocol
self.setup_services(broker_version=KafkaVersion(broker_version), auto_create_topics=auto_create_topics, security_protocol=security_protocol)
self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node))
self.cc.start()
self.logger.info("Creating connectors")
self._start_connector("connect-file-source.properties")
self._start_connector("connect-file-sink.properties")
# Generating data on the source node should generate new records and create new output on the sink node. Timeouts
# here need to be more generous than they are for standalone mode because a) it takes longer to write configs,
# do rebalancing of the group, etc, and b) without explicit leave group support, rebalancing takes awhile
for node in self.cc.nodes:
node.account.ssh("echo -e -n " + repr(self.FIRST_INPUTS) + " >> " + self.INPUT_FILE)
wait_until(lambda: self._validate_file_output(self.FIRST_INPUT_LIST), timeout_sec=70, err_msg="Data added to input file was not seen in the output file in a reasonable amount of time.")
def _validate_file_output(self, input):
input_set = set(input)
# Output needs to be collected from all nodes because we can't be sure where the tasks will be scheduled.
# Between the first and second rounds, we might even end up with half the data on each node.
output_set = set(itertools.chain(*[
[line.strip() for line in self._file_contents(node, self.OUTPUT_FILE)] for node in self.cc.nodes
]))
return input_set == output_set
def _file_contents(self, node, file):
try:
# Convert to a list here or the RemoteCommandError may be returned during a call to the generator instead of
# immediately
return list(node.account.ssh_capture("cat " + file))
except RemoteCommandError:
return []
|
the-stack_106_30221 | from __future__ import absolute_import, division, print_function, unicode_literals
import json
import random
import unittest
from amaascore.assets.custom_asset import CustomAsset
from amaascore.assets.interface import AssetsInterface
from tests.unit.config import STAGE
class Pizza(CustomAsset):
def __init__(self, size, asset_id, asset_manager_id, toppings=None):
self.size = size
self.toppings = toppings
client_additional = json.dumps({'size': self.size, 'toppings': self.toppings})
super(Pizza, self).__init__(asset_manager_id=asset_manager_id, asset_id=asset_id,
client_additional=client_additional, fungible=False)
class CustomAssetTest(unittest.TestCase):
def setUp(self):
self.longMessage = True # Print complete error message on failure
asset_manager_id = random.randint(1, 2**31-1)
self.pizza = Pizza(asset_id='pizza1', asset_manager_id=asset_manager_id,
size='Large', toppings=['pineapple', 'corn', 'garlic'])
self.assets_interface = AssetsInterface(environment=STAGE)
def tearDown(self):
pass
def test_CustomAsset(self):
pizza = self.assets_interface.new(self.pizza)
self.assertEqual(type(pizza), CustomAsset)
self.assertIsNotNone(pizza.client_additional)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_30222 | from MDSplus import *
my_tree=Tree('test',-1,'edit')
#Set up nodes
try :
my_tree.addNode('scratch','structure')
my_node=my_tree.getNode('scratch')
my_node.addNode('my_name','text')
my_node.addNode('my_age','numeric')
my_node.addNode('age_months','numeric')
my_node.addNode('timebase','axis')
my_node.addNode('my_cosine','signal')
except :
print("Couldn't add scratch example nodes")
try :
my_tree.addNode('demo_time','structure')
my_node=my_tree.getNode('demo_time')
my_node.addNode('tsync_sig','signal')
my_node.getNode('tsync_sig').addNode('comment','text')
my_node.getNode('tsync_sig.comment').record='Pulse-width-modulated signal encoding the global time. The rising edges of pulses always occur at times that are integer multiples of 0.01 s on the global lab clock. The duration of time that each pulse is on encodes the global time by the scale factor, 0.001 seconds of pulse on time = 1 s on the global clock. A pulse duration of 0.00075 s then means that that lab time was 0.75 s at the rising edge of this pulse.'
my_node.addNode('t_trig_ideal','numeric')
my_node.getNode('t_trig_ideal').addNode('comment','text')
my_node.getNode('t_trig_ideal.comment').record='Ideal trigger time [s] of the signal, which is likely in error from what the actual hardware achieved.'
my_node.addNode('t_samp_ideal','numeric')
my_node.getNode('t_samp_ideal').addNode('comment','text')
my_node.getNode('t_samp_ideal.comment').record='Ideal sampling time [s] of the signal, which is likely in error from what the actual hardware achieved.'
my_node.addNode('t_trig_corr','numeric')
my_node.getNode('t_trig_corr').addNode('comment','text')
my_node.getNode('t_trig_corr.comment').record='Corrected trigger time based on analysis of tsync_sig [s].'
my_node.addNode('t_samp_corr','numeric')
my_node.getNode('t_samp_corr').addNode('comment','text')
my_node.getNode('t_samp_corr.comment').record='Corrected sampling time based on analysis of tsync_sig [s].'
except :
print("Couldn't add demo_time nodes")
try :
my_node=my_tree.getNode('demo_time')
my_node.addNode('v_thresh','numeric')
my_node.getNode('v_thresh').addNode('comment','text')
my_node.getNode('v_thresh.comment').record='Threshold voltage delineating the boundary between the ON and OFF logic levels for tsync_sig [V].'
my_node.getNode('v_thresh').record=2.5
except :
print("Couldn't add threshold voltage node")
try :
my_node=my_tree.getNode('demo_time.tsync_sig')
my_node.addNode('raw_sig','numeric')
my_node.addNode('ideal_time','axis')
except :
print("Couldn't add numeric nodes for tsync_sig")
my_tree.write()
|
the-stack_106_30223 | import pygame, sys, random, os
from DinoRun import *
from pygame.locals import *
import time
sys.path.insert(0, '../../')
WINDOWWIDTH = 1280
WINDOWHEIGHT = 720
# PHan backgroud menu
BG_MENU_IMG = pygame.image.load('img/backgroundMenu.png')
BG_PLAY_IMG = pygame.image.load("img/BackGroundPlay.png")
BG_MENU_SetAV = pygame.image.load("img/GiaoDienChonSetNV.png")
BG_Betting = pygame.image.load("img/GiaoDienBietting.png")
#BG_HELP_IMG = pygame.image.load('img/')
#BG_SHOP_IMG = pygame.image.load('img/')
b = [] # anh xe khi chien thang
a=[] # thu tu xe
# Hieu ung nut
NutPlay = pygame.image.load("img/NutPlay1.png")
NutHelp = pygame.image.load("img/NutHelp.png")
NutMiniGame = pygame.image.load("img/mini.png")
NutShop = pygame.image.load("img/shop.png")
#Am thanh
menu_sound = pygame.mixer.Sound("sound/Road Tripzzz - Ofshane (2).mp3")
menu_sound.set_volume(0.25)
minigame_sound = pygame.mixer.Sound("sound/Cuckoo Clock - Quincas Moreira.mp3")
minigame_sound.set_volume(0.25)
pygame.init()
FPS = 60
fpsClock = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.flip()
pygame.display.set_caption('RACING BETTING')
#======================================================================================
RED=(255,0,0)
GREEN=(0,255,0)
WINDOWWIDTH = 1500
WINDOWHEIGHT = 700
X_MARGIN = 80
LANEWIDTH = 60
CARWIDTH = 0
CARHEIGHT = 0
CARSPEED = 3
CARIMG10=pygame.image.load("img/dollar.png")
class Car1():
def __init__(self):
self.width = CARWIDTH
self.height = CARHEIGHT
self.x = 0
self.y = 312
self.speed = CARSPEED
self.surface = pygame.Surface((self.width, self.height))
self.surface.fill((255, 255, 255))
def draw(self):
DISPLAYSURF.blit(car_1, (int(self.x), int(self.y)))
global pos_now1
pos_now1 = self.x
if (pos_now1 > 1181):
a.append(1)
def update(self):
global pos_now1
pos_now1 = self.x
if self.x <= 1180:
if self.x +6>1180:
self.x += 8
global vt_1
vt_1 = 0
change = random.randint(1, 5)
if ((self.x > 100 * change) and (self.x < 100 * change + 5)):
self.x = 400
elif ((self.x > 105 * change) and (self.x < 105 * change + 5)):
pass
elif ((self.x > 125 * change) and (self.x < 125 * change + 100)):
self.x -= random.randint(0,3)
else:
self.x += random.randint(0,3)
else:
vt_1= 1
self.x = 1181
class Car2():
def __init__(self):
self.width = CARWIDTH
self.height = CARHEIGHT
self.x = 0
self.y = 388
self.speed = CARSPEED
self.surface = pygame.Surface((self.width, self.height))
self.surface.fill((255, 255, 255))
def draw(self):
DISPLAYSURF.blit(car_2, (int(self.x), int(self.y)))
global pos_now2
pos_now2 = self.x
if (pos_now2 > 1181):
a.append(2)
def update(self):
global pos_now2
pos_now2 = self.x
if self.x <= 1180:
if self.x +6>1180:
self.x += 8
global vt_2
vt_2 = 0
change = random.randint(1, 5)
if ((self.x > 100 * change) and (self.x < 100 * change + 5)):
self.x = 400
elif ((self.x > 105 * change) and (self.x < 105 * change + 5)):
pass
elif ((self.x > 125 * change) and (self.x < 125 * change + 100)):
self.x -= random.randint(0, 3)
else:
self.x += random.randint(0, 3)
else:
vt_2 = 2
self.x = 1181
vt2_x = self.x
class Car3():
def __init__(self):
self.width = CARWIDTH
self.height = CARHEIGHT
self.x = 0
self.y = 479
self.speed = CARSPEED
self.surface = pygame.Surface((self.width, self.height))
self.surface.fill((255, 255, 255))
def draw(self):
DISPLAYSURF.blit(car_3, (int(self.x), int(self.y)))
global pos_now3
pos_now3 = self.x
if (pos_now3 > 1181):
a.append(3)
def update(self):
global pos_now3
pos_now3 = self.x
if self.x <= 1180:
if self.x +6>1180:
self.x += 8
global vt_3
vt_3 = 0
change = random.randint(1, 5)
if ((self.x > 100 * change) and (self.x < 100 * change + 5)):
self.x = 400
elif ((self.x > 105 * change) and (self.x < 105 * change + 5)):
pass
elif ((self.x > 125 * change) and (self.x < 125 * change + 100)):
self.x -= random.randint(0, 3)
else:
self.x += random.randint(0, 3)
else:
vt_3 = 3
self.x = 1181
class Car4():
def __init__(self):
self.width = CARWIDTH
self.height = CARHEIGHT
self.x = 0
self.y = 564
self.speed = CARSPEED
self.surface = pygame.Surface((self.width, self.height))
self.surface.fill((255, 255, 255))
def draw(self):
DISPLAYSURF.blit(car_4, (int(self.x), int(self.y)))
global pos_now4
pos_now4 = self.x
if (pos_now4 > 1181):
a.append(4)
def update(self):
global pos_now4
pos_now4 = self.x
if self.x <= 1180:
if self.x +6>1180:
self.x += 8
global vt_4
vt_4 = 0
change = random.randint(1, 5)
if ((self.x > 100 * change) and (self.x < 100 * change + 5)):
self.x = 400
elif ((self.x > 105 * change) and (self.x < 105 * change + 5)):
pass
elif ((self.x > 125 * change) and (self.x < 125 * change + 100)):
self.x -= random.randint(0, 3)
else:
self.x += random.randint(0, 3)
else:
vt_4 = 4
self.x = 1181
class Car5():
def __init__(self):
self.width = CARWIDTH
self.height = CARHEIGHT
self.x = 0
self.y = 646
self.speed = CARSPEED
self.surface = pygame.Surface((self.width, self.height))
self.surface.fill((255, 255, 255))
def draw(self):
DISPLAYSURF.blit(car_5, (int(self.x), int(self.y)))
global pos_now5
pos_now5=self.x
if (pos_now5>1181):
a.append(5)
def update(self):
if self.x <= 1180:
if self.x +6>1180:
self.x += 8
global vt_5
vt_5 = 0
change = random.randint(1, 5)
if self.x==1280:
self.x+=2
if ((self.x > 100 * change) and (self.x < 100 * change + 5)):
self.x = 400
elif ((self.x > 105 * change) and (self.x < 105 * change + 5)):
pass
elif ((self.x > 125 * change) and (self.x < 125 * change + 100)):
self.x -= random.randint(0, 3)
else:
self.x += random.randint(0, 3)
else:
vt_5 = 5
self.x = 1181
def gamePlay(bg, car1, car2, car3, car4, car5):
tmp = 10
global coin, tienCuoc
car1.__init__()
car2.__init__()
car3.__init__()
car4.__init__()
car5.__init__()
bg.__init__()
bg.count_321()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
bg.draw()
car1.draw()
car1.update()
car2.draw()
car2.update()
car3.draw()
car3.update()
car4.draw()
car4.update()
car5.draw()
car5.update()
print(chon_xe)
print(a)
if (vt_1==1 and vt_2==2 and vt_3== 3 and vt_4==4 and vt_5==5):
if (chon_xe[0]== a[0]):
over_bg = pygame.image.load("img\giaodienWWin.png")
DISPLAYSURF.blit(over_bg, (0, 0))
if ( tmp == 10):
coin[0] += int(tienCuoc[0]) * 10
tmp += 10
else:
over_bg = pygame.image.load("img\giaodienOver.png")
DISPLAYSURF.blit(over_bg, (0, 0))
if (tmp == 10 ):
coin[0] -= int(tienCuoc[0])
tmp += 10
file_2 = open(coin_username_info, 'w')
file_2.write(str(coin[0]))
file_2.close()
for i in range(5):
if i == 0:
DISPLAYSURF.blit(b[a[i] - 1], (551, 245))
if i == 1:
DISPLAYSURF.blit(b[a[i] - 1], (406, 340))
if i == 2:
DISPLAYSURF.blit(b[a[i] - 1], (690, 377))
if i == 3:
DISPLAYSURF.blit(b[a[i] - 1], (274, 426))
if i == 4:
DISPLAYSURF.blit(b[a[i] - 1], (836, 460))
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE:
a.clear()
b.clear()
menu_sound.stop()
MeNu()
pygame.display.update()
fpsClock.tick(FPS)
def start_the_game():
bg = Back_ground()
car1 = Car1()
car2 = Car2()
car3 = Car3()
car4 = Car4()
car5 = Car5()
gamePlay(bg, car1, car2, car3, car4, car5)
#######################################################
def drawCoin(): # vẽ tiền
draw_text(str(coin[0]) + "$", "font/monofonto.ttf", 38, (255, 255, 255), SCREEN_WIDTH - 70, 170, "topright")
def draw_Race(race_img): #hàm vẽ đường đua
DISPLAYSURF.blit(race_img, 0, 0)
#ve cac giao dien
def HamGiaoDienSetNV():
while True:
mouse_x, mouse_y = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# Quay lại
if event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE:
return
if event.type == pygame.MOUSEBUTTONDOWN:
# Quay lại
if (event.button == 1) & (mouse_x >= 1173) & (mouse_x <= 1259) & (mouse_y >= 32) & (mouse_y <= 112):
return
#chon nut tick thu nhat
if (event.button == 1) & (mouse_x >= 1032) & (mouse_x <= 1090) & (mouse_y >= 270) & (mouse_y <= 328):
HamGiaoDienBetting(1)
if (event.button == 1) & (mouse_x >= 1032) & (mouse_x <= 1090) & (mouse_y >= 340) & (mouse_y <= 398):
HamGiaoDienBetting(2)
if (event.button == 1) & (mouse_x >= 1032) & (mouse_x <= 1090) & (mouse_y >= 417) & (mouse_y <= 478):
HamGiaoDienBetting(3)
if (event.button == 1) & (mouse_x >= 1032) & (mouse_x <= 1090) & (mouse_y >= 487) & (mouse_y <= 549):
HamGiaoDienBetting(4)
if (event.button == 1) & (mouse_x >= 1032) & (mouse_x <= 1090) & (mouse_y >= 566) & (mouse_y <= 624):
HamGiaoDienBetting(5)
DISPLAYSURF.blit(BG_MENU_SetAV, (0, 0))
# Hieu ung nut
if (mouse_x >= 1173) & (mouse_x <= 1259) & (mouse_y >= 32) & (mouse_y <= 112):
DISPLAYSURF.blit(pygame.image.load("img/NutBack.png"), (0, 0))
if (mouse_x >= 1032) & (mouse_x <= 1090) & (mouse_y >= 270) & (mouse_y <= 328):
DISPLAYSURF.blit(pygame.image.load("img/NutTickSet1.png"), (0, 0))
if (mouse_x >= 1032) & (mouse_x <= 1090) & (mouse_y >= 340) & (mouse_y <= 398):
DISPLAYSURF.blit(pygame.image.load("img/NutTickSet2.png"), (0, 0))
if (mouse_x >= 1032) & (mouse_x <= 1090) & (mouse_y >= 417) & (mouse_y <= 478):
DISPLAYSURF.blit(pygame.image.load("img/NutTickSet3.png"), (0, 0))
if (mouse_x >= 1032) & (mouse_x <= 1090) & (mouse_y >= 487) & (mouse_y <= 549):
DISPLAYSURF.blit(pygame.image.load("img/NutTickSet4.png"), (0, 0))
if (mouse_x >= 1032) & (mouse_x <= 1090) & (mouse_y >= 566) & (mouse_y <= 624):
DISPLAYSURF.blit(pygame.image.load("img/NutTickSet5.png"), (0, 0))
pygame.display.update()
FONT = pygame.font.Font("font/monofonto.ttf", 32)
tienCuoc = [0]
class InputBox:
def __init__(self, x, y, w, h, text= '0' ):
global tienCuoc
self.rect = pygame.Rect(x, y, w, h)
self.color = pygame.Color((0, 0, 0))
self.text = text
self.txt_surface = FONT.render(text, True, self.color)
self.active = False
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
# If the user clicked on the input_box rect.
if self.rect.collidepoint(event.pos):
# Toggle the active variable.
self.active = not self.active
else:
self.active = False
# Change the current color of the input box.
self.color = pygame.Color((255, 255, 255)) if self.active else pygame.Color((0, 0, 0))
if event.type == pygame.KEYDOWN:
if self.active:
if event.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
else:
self.text += event.unicode
# Re-render the text.
self.txt_surface = FONT.render(self.text, True, self.color)
def update(self):
# Resize the box if the text is too long.
width = max(200, self.txt_surface.get_width()+10)
self.rect.w = width
def draw(self, screen):
# Blit the text.
DISPLAYSURF.blit(self.txt_surface, (self.rect.x+5, self.rect.y+5))
# Blit the rect.
pygame.draw.rect(DISPLAYSURF, self.color, self.rect, 2)
def HamGiaoDienBetting(set):
global chon_xe
chon_xe = [1]
global car_1, car_2, car_3, car_4, car_5, tienCuoc
if (set == 1):
car_1 = pygame.image.load("img/Set Xe/16.png")
car_2 = pygame.image.load("img/Set Xe/17.png")
car_3 = pygame.image.load("img/Set Xe/18.png")
car_4 = pygame.image.load("img/Set Xe/19.png")
car_5 = pygame.image.load("img/Set Xe/20.png")
elif (set == 2):
car_1 = pygame.image.load("img/Set Xe/15.png")
car_2 = pygame.image.load("img/Set Xe/13.png")
car_3 = pygame.image.load("img/Set Xe/11.png")
car_4 = pygame.image.load("img/Set Xe/12.png")
car_5 = pygame.image.load("img/Set Xe/14.png")
elif (set == 3):
car_1 = pygame.image.load("img/Set Xe/10.png")
car_2 = pygame.image.load("img/Set Xe/7.png")
car_3 = pygame.image.load("img/Set Xe/6.png")
car_4 = pygame.image.load("img/Set Xe/8.png")
car_5 = pygame.image.load("img/Set Xe/9.png")
elif (set == 4):
car_1 = pygame.image.load("img/Set Xe/5.png")
car_2 = pygame.image.load("img/Set Xe/3.png")
car_3 = pygame.image.load("img/Set Xe/1.png")
car_4 = pygame.image.load("img/Set Xe/2.png")
car_5 = pygame.image.load("img/Set Xe/4.png")
elif (set == 5):
car_1 = pygame.image.load("img/Set Xe/21.png")
car_2 = pygame.image.load("img/Set Xe/22.png")
car_3 = pygame.image.load("img/Set Xe/23.png")
car_4 = pygame.image.load("img/Set Xe/24.png")
car_5 = pygame.image.load("img/Set Xe/25.png")
b.append(car_1)
b.append(car_2)
b.append(car_3)
b.append(car_4)
b.append(car_5)
Nut1 = False
Nut2 = False
Nut3 = False
Nut4 = False
Nut5 = False
clock = pygame.time.Clock()
input_box = InputBox(680, 458, 140, 42) # Khai bao cai hop
while True:
mouse_x, mouse_y = pygame.mouse.get_pos()
DISPLAYSURF.blit(BG_Betting, (0, 0))
draw_text(" Enter your stake: ", "font/monofonto.ttf", 38, (255, 255, 255), 680, 453, "topright")
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
input_box.handle_event(event)
# Quay lại
if event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE:
return
if event.type == pygame.MOUSEBUTTONDOWN:
global choose_1, choose_2, choose_3, choose_4, choose_5
# Quay lại
if (event.button == 1) & (mouse_x >= 1173) & (mouse_x <= 1259) & (mouse_y >= 32) & (mouse_y <= 112):
return
# Chon nut 1
if (event.button == 1) & (mouse_x >= 334) & (mouse_x <= 396) & (mouse_y >= 347) & (mouse_y <= 407):
Nut1 = True
Nut2 = False
Nut3 = False
Nut4 = False
Nut5 = False
chon_xe[0] = 1
# Chon nut 2
if (event.button == 1) & (mouse_x >= 471) & (mouse_x <= 532) & (mouse_y >= 347) & (mouse_y <= 407):
Nut1 = False
Nut2 = True
Nut3 = False
Nut4 = False
Nut5 = False
chon_xe[0] = 2
# chon nut 3
if (event.button == 1) & (mouse_x >= 606) & (mouse_x <= 668) & (mouse_y >= 347) & (mouse_y <= 407):
Nut1 = False
Nut2 = False
Nut3 = True
Nut4 = False
Nut5 = False
chon_xe[0] = 3
# Chon nut 4
if (event.button == 1) & (mouse_x >= 751) & (mouse_x <= 810) & (mouse_y >= 347) & (mouse_y <= 407):
Nut1 = False
Nut2 = False
Nut3 = False
Nut4 = True
Nut5 = False
chon_xe[0] = 4
if (event.button == 1) & (mouse_x >= 888) & (mouse_x <= 950) & (mouse_y >= 347) & (mouse_y <= 407):
Nut1 = False
Nut2 = False
Nut3 = False
Nut4 = False
Nut5 = True
chon_xe[0] = 5
if tienCuoc[0] == '':
print(tienCuoc[0])
elif (int(tienCuoc[0]) <= int(coin[0])) & (int(tienCuoc[0]) > 0) & (event.button == 1) & (mouse_x >= 570) & (mouse_x <= 754) & (mouse_y >= 540) & (mouse_y <= 607):
start_the_game()
# in set nhan vat ra
if set == 1:
DISPLAYSURF.blit(pygame.image.load("img/Set 1.png"), (0, 0))
if set == 2:
DISPLAYSURF.blit(pygame.image.load("img/Set 2.png"), (0, 0))
if set == 3:
DISPLAYSURF.blit(pygame.image.load("img/Set 3.png"), (0, 0))
if set == 4:
DISPLAYSURF.blit(pygame.image.load("img/Set 4.png"), (0, 0))
if set == 5:
DISPLAYSURF.blit(pygame.image.load("img/Set 5.png"), (0, 0))
input_box.update()
# Hieu ung chon
if Nut1 == True:
DISPLAYSURF.blit(pygame.image.load("img/NutTick1.png"), (0, 0))
elif Nut2 == True:
DISPLAYSURF.blit(pygame.image.load("img/NutTick2.png"), (0, 0))
elif Nut3 == True:
DISPLAYSURF.blit(pygame.image.load("img/NutTick3.png"), (0, 0))
elif Nut4 == True:
DISPLAYSURF.blit(pygame.image.load("img/NutTick4.png"), (0, 0))
elif Nut5 == True:
DISPLAYSURF.blit(pygame.image.load("img/NutTick5.png"), (0, 0))
# Hieu ung nut
if (mouse_x >= 1173) & (mouse_x <= 1259) & (mouse_y >= 32) & (mouse_y <= 112):
DISPLAYSURF.blit(pygame.image.load("img/NutBack.png"), (0, 0))
if (mouse_x >= 334) & (mouse_x <= 396) & (mouse_y >= 347) & (mouse_y <= 407):
DISPLAYSURF.blit(pygame.image.load("img/NutTick1.png"), (0, 0))
if (mouse_x >= 471) & (mouse_x <= 532) & (mouse_y >= 347) & (mouse_y <= 407):
DISPLAYSURF.blit(pygame.image.load("img/NutTick2.png"), (0, 0))
if (mouse_x >= 606) & (mouse_x <= 668) & (mouse_y >= 347) & (mouse_y <= 407):
DISPLAYSURF.blit(pygame.image.load("img/NutTick3.png"), (0, 0))
if (mouse_x >= 751) & (mouse_x <= 810) & (mouse_y >= 347) & (mouse_y <= 407):
DISPLAYSURF.blit(pygame.image.load("img/NutTick4.png"), (0, 0))
if (mouse_x >= 888) & (mouse_x <= 950) & (mouse_y >= 347) & (mouse_y <= 407):
DISPLAYSURF.blit(pygame.image.load("img/NutTick5.png"), (0, 0))
if (mouse_x >= 570) & (mouse_x <= 754) & (mouse_y >= 540) & (mouse_y <= 607):
DISPLAYSURF.blit(pygame.image.load("img/NutStart.png"), (0, 0))
input_box.draw(DISPLAYSURF)
tienCuoc[0] = input_box.text
drawCoin()
pygame.display.flip()
pygame.display.update()
clock.tick(30)
#############################################################################
class Back_ground():
def __init__(self):
self.x = 0
self.y = 0
self.img = map
#self.width = self.img.get_width()
#self.height = self.img.get_height()
def draw(self):
DISPLAYSURF.blit(self.img, (int(self.x), int(self.y)))
#DISPLAYSURF.blit(self.img, (int(self.x), int(self.y - self.height)))
def count_321(self):
count = 3
while count >= 0:
DISPLAYSURF.blit(self.img, (int(self.x), int(self.y)))
if count == 0:
message_display("GO!", 100, -70, (0, 255, 255), 1)
elif count == 3:
message_display(str(count), 100, -70, (255,0,0), 0.75)
elif count == 2:
message_display(str(count), 100, -70, (255, 255, 0), 0.75)
elif count == 1:
message_display(str(count), 100, -70, (0, 255, 0), 0.75)
count -= 1
fpsClock.tick(FPS)
def text_objects(text, font, color):
textSurface = font.render(text, True, color)
return textSurface, textSurface.get_rect()
def message_display(text, shift_x, shift_y, color, sleep_time):
largeText = pygame.font.SysFont('comicsansms', 72, True)
TextSurf, TextRect = text_objects(text, largeText, color)
TextRect.center = ((WINDOWWIDTH / 2 - shift_x), (WINDOWHEIGHT / 2 - shift_y))
DISPLAYSURF.blit(TextSurf, TextRect)
pygame.display.update()
time.sleep(sleep_time)
#############################################################################3
def HamGiaoDienHelp():
while True:
mouse_x, mouse_y = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
# Quay lại
if (event.button == 1) & (mouse_x >= 1173) & (mouse_x <= 1259) & (mouse_y >= 32) & (mouse_y <= 112):
return
DISPLAYSURF.blit(pygame.image.load("img/GiaodienHelp.png"), (0, 0))
if (mouse_x >= 1173) & (mouse_x <= 1259) & (mouse_y >= 32) & (mouse_y <= 112):
DISPLAYSURF.blit(pygame.image.load("img/NutBack.png"), (0, 0))
pygame.display.update()
def HamGiaoDienShop():
while True:
mouse_x, mouse_y = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
# Quay lại
if (event.button == 1) & (mouse_x >= 1173) & (mouse_x <= 1259) & (mouse_y >= 32) & (mouse_y <= 112):
return
DISPLAYSURF.blit(pygame.image.load("img/GiaoDienShop.png"), (0, 0))
if (mouse_x >= 1173) & (mouse_x <= 1259) & (mouse_y >= 32) & (mouse_y <= 112):
DISPLAYSURF.blit(pygame.image.load("img/NutBack.png"), (0, 0))
pygame.display.update()
def HamGiaoDienPlay():
global map,car1,car2,car3,car4,car5
while True:
mouse_x, mouse_y = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# Quay lại
if event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE:
return
if event.type == pygame.MOUSEBUTTONDOWN:
# Quay lại
if (event.button == 1) & (mouse_x >= 1173) & (mouse_x <= 1259) & (mouse_y >= 32) & (mouse_y <= 112):
return
# chon Map
if (event.button == 1) & (mouse_x >= 0) & (mouse_x <= 415) & (mouse_y >= 460) & (mouse_y <= 690):
map=pygame.image.load("img/Map1.png")
HamGiaoDienSetNV()
if (event.button == 1) & (mouse_x >= 0) & (mouse_x <= 415) & (mouse_y >= 158) & (mouse_y <= 392):
map=pygame.image.load("img/Map5.png")
HamGiaoDienSetNV()
if (event.button == 1) & (mouse_x >= 428) & (mouse_x <= 847) & (mouse_y >= 289) & (mouse_y <= 527):
map=pygame.image.load("img/Map3.png")
HamGiaoDienSetNV()
if (event.button == 1) & (mouse_x >= 858) & (mouse_x <= 1280) & (mouse_y >= 151) & (mouse_y <= 392):
map=pygame.image.load("img/Map4.png")
HamGiaoDienSetNV()
if (event.button == 1) & (mouse_x >= 858) & (mouse_x <= 1280) & (mouse_y >= 455) & (mouse_y <= 720):
map=pygame.image.load("img/Map2.png")
HamGiaoDienSetNV()
DISPLAYSURF.blit(BG_PLAY_IMG, (0, 0)) # Background sau khi ấn nút Play
# Bên dưới là hiệu ứng nút Play
if (mouse_x >= 1173) & (mouse_x <= 1259) & (mouse_y >= 32) & (mouse_y <= 112):
DISPLAYSURF.blit(pygame.image.load("img/NutBack.png"), (0, 0))
if (mouse_x >= 0) & (mouse_x <= 415) & (mouse_y >= 460) & (mouse_y <= 690):
DISPLAYSURF.blit(pygame.image.load("img/NutChonseMap1.png"), (0, 0))
if (mouse_x >= 0) & (mouse_x <= 415) & (mouse_y >= 158) & (mouse_y <= 392):
DISPLAYSURF.blit(pygame.image.load("img/NutChoseMap5.png"), (0, 0))
if (mouse_x >= 428) & (mouse_x <= 847) & (mouse_y >= 289) & (mouse_y <= 527):
DISPLAYSURF.blit(pygame.image.load("img/NutChoseMap3.png"), (0, 0))
if (mouse_x >= 858) & (mouse_x <= 1280) & (mouse_y >= 151) & (mouse_y <= 392):
DISPLAYSURF.blit(pygame.image.load("img/NutChoseMap4.png"), (0, 0))
if (mouse_x >= 858) & (mouse_x <= 1280) & (mouse_y >= 455) & (mouse_y <= 720):
DISPLAYSURF.blit(pygame.image.load("img/NutChoseMap2.png"), (0, 0))
pygame.display.update()
def MeNu():
menu_sound.play(-1) # Bât nhạc menu
global coin
while True:
mouse_x, mouse_y = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if (event.button == 1) & (coin[0] > 0) & (mouse_x >= 506) & (mouse_x <= 817) & (mouse_y >= 210) &( mouse_y <= 294): #vào play
HamGiaoDienPlay()
if (event.button == 1) & (mouse_x >= 506) & (mouse_x <= 817) & (mouse_y >= 334) &( mouse_y <= 420):
HamGiaoDienShop()
if (event.button == 1) & (coin[0] == 0) & (mouse_x >= 506) & (mouse_x <= 817) & (mouse_y >= 454) &( mouse_y <= 537):
print("MiniGame")
menu_sound.stop()
minigame_sound.play()
start_game(coin)
file_2 = open(coin_username_info, 'w')
file_2.write(str(coin[0]))
file_2.close()
minigame_sound.stop()
menu_sound.play(-1)
if (event.button == 1) & (mouse_x >= 610) & (mouse_x <= 717) & (mouse_y >= 576) &( mouse_y <= 670):
HamGiaoDienHelp()
DISPLAYSURF.blit(BG_MENU_IMG, (0, 0))
drawCoin()
# Chỗ này làm hiệu ứn nút
if (mouse_x >= 506) & (mouse_x <= 817) & (mouse_y >= 210) & (mouse_y <= 294):
DISPLAYSURF.blit(NutPlay, (0, 0))
if (mouse_x >= 506) & (mouse_x <= 817) & (mouse_y >= 334) & (mouse_y <= 420):
DISPLAYSURF.blit(NutShop, (0, 0))
if (mouse_x >= 506) & (mouse_x <= 817) & (mouse_y >= 454) & (mouse_y <= 537):
DISPLAYSURF.blit(NutMiniGame, (0, 0))
if (mouse_x >= 610) & (mouse_x <= 717) & (mouse_y >= 576) & (mouse_y <= 670):
DISPLAYSURF.blit(NutHelp, (0, 0))
pygame.display.update()
def main():
MeNu()
#####################################
from tkinter import *
import os
def delete2():
screen3.destroy()
def delete3():
screen4.destroy()
def delete4():
screen5.destroy()
def login_sucess():
global screen3
screen3 = Toplevel(screen)
screen3.title("Success")
screen3.geometry("150x100")
Label(screen3, text="Login Sucess").pack()
Button(screen3, text="OK", command=delete2).pack()
def password_not_recognised():
global screen4
screen4 = Toplevel(screen)
screen4.title("Success")
screen4.geometry("150x100")
Label(screen4, text="Password Error").pack()
Button(screen4, text="OK", command=delete3).pack()
def user_not_found():
global screen5
screen5 = Toplevel(screen)
screen5.title("Success")
screen5.geometry("150x100")
Label(screen5, text="User Not Found").pack()
Button(screen5, text="OK", command=delete4).pack()
def register_user():
print("working")
global username_info
username_info = username.get()
password_info = password.get()
file = open(username_info, "w")
file.write(username_info + "\n")
file.write(password_info)
file.close()
global coin_username_info
coin_username_info =username_info+"_coin"
file_1 = open(coin_username_info, "w")
file_1.write(str(0))
username_entry.delete(0, END)
password_entry.delete(0, END)
Label(screen1, text="Registration Sucess", fg="green", font=("calibri", 11)).pack()
def login_verify():
username1 = username_verify.get()
password1 = password_verify.get()
username_entry1.delete(0, END)
password_entry1.delete(0, END)
list_of_files = os.listdir()
if username1 in list_of_files:
file1 = open(username1, "r")
verify = file1.read().splitlines()
global coin_username_info
coin_username_info=username1+"_coin"
if password1 in verify:
global coin
coin=[]
file_1=open(coin_username_info,'r')
n=file_1.read()
coin.append(int(n))
if __name__ == '__main__':
main()
else:
password_not_recognised()
else:
user_not_found()
def register():
global screen1
screen1 = Toplevel(screen)
screen1.title("Register")
screen1.geometry("300x250")
global username
global password
global username_entry
global password_entry
username = StringVar()
password = StringVar()
Label(screen1, text="Please enter details below").pack()
Label(screen1, text="").pack()
Label(screen1, text="Username * ").pack()
username_entry = Entry(screen1, textvariable=username)
username_entry.pack()
Label(screen1, text="Password * ").pack()
password_entry = Entry(screen1, textvariable=password)
password_entry.pack()
Label(screen1, text="").pack()
Button(screen1, text="Register", width=10, height=1, command=register_user).pack()
def login():
global screen2
screen2 = Toplevel(screen)
screen2.title("Login")
screen2.geometry("300x250")
Label(screen2, text="Please enter details below to login").pack()
Label(screen2, text="").pack()
global username_verify
global password_verify
username_verify = StringVar()
password_verify = StringVar()
global username_entry1
global password_entry1
Label(screen2, text="Username * ").pack()
username_entry1 = Entry(screen2, textvariable=username_verify)
username_entry1.pack()
Label(screen2, text="").pack()
Label(screen2, text="Password * ").pack()
password_entry1 = Entry(screen2, textvariable=password_verify)
password_entry1.pack()
Label(screen2, text="").pack()
Button(screen2, text="Login", width=10, height=1, command=login_verify).pack()
def main_screen():
global screen
screen = Tk()
screen.geometry("300x250")
screen.title("Notes 1.0")
Label(text="Notes 1.0", bg="grey", width="300", height="2", font=("Calibri", 13)).pack()
Label(text="").pack()
Button(text="Login", height="2", width="30", command=login).pack()
Label(text="").pack()
Button(text="Register", height="2", width="30", command=register).pack()
screen.mainloop()
main_screen()
'''
Nhóm 3
Game: Cá cược đua xe
GVHD: VÕ HOÀNG QUÂN
33
Danh sách sinh viên thực hiện :
Dương Minh Hiếu _ 20120473 (Dev, QA)
Trần Lê Hiếu _ 20120479 (Dev, Tester)
Nguyễn Tạ Huy Hoàng _ 20120482 (Dev, BA)
Ngô Phi Hùng _ 20120486 (Dev, PM)
Đỗ Đăng Huy _ 20120492 (Dev, Designer)
'''
|
the-stack_106_30224 | import time
import adafruit_lis3dh
import board
import busio
import neopixel
# This is derviced from: https://github.com/adafruit/Adafruit_CircuitPython_LIS3DH/blob/master/examples/spinner.py
def flash(c):
pixels.fill((0, 0, 0))
for i in range(3):
pixels.fill(c)
pixels.show()
time.sleep(0.1)
pixels.fill((0, 0, 0))
pixels.show()
time.sleep(0.1)
# Accelerometer tap threshold. Higher values mean you need to tap harder to start a spin.
TAP_THRESHOLD = 20
# Initialize NeoPixels
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10, auto_write=False)
pixels.fill((0, 0, 0))
pixels.show()
# Initialize accelerometer
i2c = busio.I2C(board.ACCELEROMETER_SCL, board.ACCELEROMETER_SDA)
lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c, address=25)
# Set accelerometer range.
lis3dh.range = adafruit_lis3dh.RANGE_16_G
# Enable single click detection, but use a custom CLICK_CFG register value
# to only detect clicks on the X axis (instead of all 3 X, Y, Z axes).
lis3dh.set_click(1, TAP_THRESHOLD, click_cfg=0x01)
flash((20, 0, 0))
while True:
# Read the raw click detection register value and check if there was a click detected.
clicksrc = lis3dh.read_click_raw()
if clicksrc & 0b01000000 > 0:
# Check if this was a positive or negative click event.
if clicksrc == 0b1010001: # Positive click
print("Positive click")
flash((0, 0, 20))
elif clicksrc == 0b1011001: # Negative click
print("Negative click")
flash((0, 20, 00))
# Small delay to stay responsive but give time for interrupt processing.
time.sleep(0.05)
|
the-stack_106_30226 | from __future__ import unicode_literals
from datetime import timedelta
import logging
from django.conf import settings
from django.db import transaction
from django.utils import timezone
from waldur_core.core import tasks as core_tasks, utils as core_utils
from waldur_core.quotas import exceptions as quotas_exceptions
from waldur_core.structure import (models as structure_models, tasks as structure_tasks,
SupportedServices)
from . import models, serializers, log
logger = logging.getLogger(__name__)
class SetInstanceOKTask(core_tasks.StateTransitionTask):
""" Additionally mark or related floating IPs as free """
def pre_execute(self, instance):
self.kwargs['state_transition'] = 'set_ok'
self.kwargs['action'] = ''
self.kwargs['action_details'] = {}
super(SetInstanceOKTask, self).pre_execute(instance)
def execute(self, instance, *args, **kwargs):
super(SetInstanceOKTask, self).execute(instance)
instance.floating_ips.update(is_booked=False)
class SetInstanceErredTask(core_tasks.ErrorStateTransitionTask):
""" Mark instance as erred and delete resources that were not created. """
def execute(self, instance):
super(SetInstanceErredTask, self).execute(instance)
# delete volumes if they were not created on backend,
# mark as erred if creation was started, but not ended,
# leave as is, if they are OK.
for volume in instance.volumes.all():
if volume.state == models.Volume.States.CREATION_SCHEDULED:
volume.delete()
elif volume.state == models.Volume.States.OK:
pass
else:
volume.set_erred()
volume.save(update_fields=['state'])
# set instance floating IPs as free, delete not created ones.
instance.floating_ips.filter(backend_id='').delete()
instance.floating_ips.update(is_booked=False)
class SetBackupErredTask(core_tasks.ErrorStateTransitionTask):
""" Mark DR backup and all related resources that are not in state OK as Erred """
def execute(self, backup):
super(SetBackupErredTask, self).execute(backup)
for snapshot in backup.snapshots.all():
# If snapshot creation was not started - delete it from NC DB.
if snapshot.state == models.Snapshot.States.CREATION_SCHEDULED:
snapshot.decrease_backend_quotas_usage()
snapshot.delete()
else:
snapshot.set_erred()
snapshot.save(update_fields=['state'])
# Deactivate schedule if its backup become erred.
schedule = backup.backup_schedule
if schedule:
schedule.error_message = 'Failed to execute backup schedule for %s. Error: %s' % (
backup.instance, backup.error_message)
schedule.is_active = False
schedule.save()
class ForceDeleteBackupTask(core_tasks.DeletionTask):
def execute(self, backup):
backup.snapshots.all().delete()
super(ForceDeleteBackupTask, self).execute(backup)
class VolumeExtendErredTask(core_tasks.ErrorStateTransitionTask):
""" Mark volume and its instance as erred on fail """
def execute(self, volume):
super(VolumeExtendErredTask, self).execute(volume)
if volume.instance is not None:
super(VolumeExtendErredTask, self).execute(volume.instance)
class BaseScheduleTask(core_tasks.BackgroundTask):
model = NotImplemented
resource_attribute = NotImplemented
def is_equal(self, other_task):
return self.name == other_task.get('name')
def run(self):
schedules = self.model.objects.filter(is_active=True, next_trigger_at__lt=timezone.now())
for schedule in schedules:
existing_resources = self._get_number_of_resources(schedule)
if existing_resources > schedule.maximal_number_of_resources:
self._remove_exceeding_backups(schedule, existing_resources)
continue
elif existing_resources == schedule.maximal_number_of_resources:
continue
kept_until = None
if schedule.retention_time:
kept_until = timezone.now() + timezone.timedelta(days=schedule.retention_time)
try:
with transaction.atomic():
schedule.call_count += 1
schedule.save()
resource = self._create_resource(schedule, kept_until=kept_until)
except quotas_exceptions.QuotaValidationError as e:
message = 'Failed to schedule "%s" creation. Error: %s' % (self.model.__name__, e)
logger.exception(
'Resource schedule (PK: %s), (Name: %s) execution failed. %s' % (schedule.pk,
schedule.name,
message))
schedule.is_active = False
schedule.error_message = message
schedule.save()
else:
executor = self._get_create_executor()
executor.execute(resource)
schedule.update_next_trigger_at()
schedule.save()
def _remove_exceeding_backups(self, schedule, resources_count):
amount_to_remove = resources_count - schedule.maximal_number_of_resources
self._log_backup_cleanup(schedule, amount_to_remove, resources_count)
resources = getattr(schedule, self.resource_attribute)
resources_to_remove = resources.order_by('kept_until')[:amount_to_remove]
resources.filter(id__in=resources_to_remove).delete()
def _log_backup_cleanup(self, schedule, amount_to_remove, resources_count):
raise NotImplementedError()
def _create_resource(self, schedule, kept_until):
raise NotImplementedError()
def _get_create_executor(self):
raise NotImplementedError()
def _get_number_of_resources(self, schedule):
resources = getattr(schedule, self.resource_attribute)
return resources.count()
class ScheduleBackups(BaseScheduleTask):
name = 'openstack_tenant.ScheduleBackups'
model = models.BackupSchedule
resource_attribute = 'backups'
def _create_resource(self, schedule, kept_until):
backup = models.Backup.objects.create(
name='Backup#%s of %s' % (schedule.call_count, schedule.instance.name),
description='Scheduled backup of instance "%s"' % schedule.instance,
service_project_link=schedule.instance.service_project_link,
instance=schedule.instance,
backup_schedule=schedule,
metadata=serializers.BackupSerializer.get_backup_metadata(schedule.instance),
kept_until=kept_until,
)
serializers.BackupSerializer.create_backup_snapshots(backup)
return backup
def _get_create_executor(self):
from . import executors
return executors.BackupCreateExecutor
def _log_backup_cleanup(self, schedule, amount_to_remove, resources_count):
message_template = ('Maximum resource count "%s" has been reached.'
'"%s" from "%s" resources are going to be removed.')
log.event_logger.openstack_backup_schedule.info(
message_template % (schedule.maximal_number_of_resources, amount_to_remove, resources_count),
event_type='resource_backup_schedule_cleaned_up',
event_context={'resource': schedule.instance, 'backup_schedule': schedule},
)
class DeleteExpiredBackups(core_tasks.BackgroundTask):
name = 'openstack_tenant.DeleteExpiredBackups'
def is_equal(self, other_task):
return self.name == other_task.get('name')
def run(self):
from . import executors
for backup in models.Backup.objects.filter(kept_until__lt=timezone.now(), state=models.Backup.States.OK):
executors.BackupDeleteExecutor.execute(backup)
class ScheduleSnapshots(BaseScheduleTask):
name = 'openstack_tenant.ScheduleSnapshots'
model = models.SnapshotSchedule
resource_attribute = 'snapshots'
def _create_resource(self, schedule, kept_until):
snapshot = models.Snapshot.objects.create(
name='Snapshot#%s of %s' % (schedule.call_count, schedule.source_volume.name),
description='Scheduled snapshot of volume "%s"' % schedule.source_volume,
service_project_link=schedule.source_volume.service_project_link,
source_volume=schedule.source_volume,
snapshot_schedule=schedule,
size=schedule.source_volume.size,
metadata=serializers.SnapshotSerializer.get_snapshot_metadata(schedule.source_volume),
kept_until=kept_until,
)
snapshot.increase_backend_quotas_usage()
return snapshot
def _get_create_executor(self):
from . import executors
return executors.SnapshotCreateExecutor
def _log_backup_cleanup(self, schedule, amount_to_remove, resources_count):
message_template = ('Maximum resource count "%s" has been reached.'
'"%s" from "%s" resources are going to be removed.')
log.event_logger.openstack_snapshot_schedule.info(
message_template % (schedule.maximal_number_of_resources, amount_to_remove, resources_count),
event_type='resource_snapshot_schedule_cleaned_up',
event_context={'resource': schedule.source_volume, 'snapshot_schedule': schedule},
)
class DeleteExpiredSnapshots(core_tasks.BackgroundTask):
name = 'openstack_tenant.DeleteExpiredSnapshots'
def is_equal(self, other_task):
return self.name == other_task.get('name')
def run(self):
from . import executors
for snapshot in models.Snapshot.objects.filter(kept_until__lt=timezone.now(), state=models.Snapshot.States.OK):
executors.SnapshotDeleteExecutor.execute(snapshot)
class SetErredStuckResources(core_tasks.BackgroundTask):
name = 'openstack_tenant.SetErredStuckResources'
def is_equal(self, other_task):
return self.name == other_task.get('name')
def run(self):
for model in (models.Instance, models.Volume, models.Snapshot):
cutoff = timezone.now() - timedelta(minutes=30)
for resource in model.objects.filter(modified__lt=cutoff,
state=structure_models.NewResource.States.CREATING):
resource.set_erred()
resource.error_message = 'Provisioning is timed out.'
resource.save(update_fields=['state', 'error_message'])
logger.warning('Switching resource %s to erred state, '
'because provisioning is timed out.',
core_utils.serialize_instance(resource))
class LimitedPerTypeThrottleMixin(object):
def get_limit(self, resource):
nc_settings = getattr(settings, 'WALDUR_OPENSTACK', {})
limit_per_type = nc_settings.get('MAX_CONCURRENT_PROVISION', {})
model_name = SupportedServices.get_name_for_model(resource)
return limit_per_type.get(model_name, super(LimitedPerTypeThrottleMixin, self).get_limit(resource))
class ThrottleProvisionTask(LimitedPerTypeThrottleMixin, structure_tasks.ThrottleProvisionTask):
pass
class ThrottleProvisionStateTask(LimitedPerTypeThrottleMixin, structure_tasks.ThrottleProvisionStateTask):
pass
|
the-stack_106_30227 | import subprocess
import logging as logger
from . import const, utils
def get_command_with_options(command, aliases, exec_params):
"""
Find command by aliases and build exec docker options
"""
if command[0] in aliases:
key = command[0]
command = aliases[key]['command'] + list(command[1:])
command_exec_params = aliases[key].get('exec', {})
exec_params = utils.merge_config(command_exec_params, exec_params)
dotenv_env = (
utils.load_dotenv(exec_params['env_file'])
if 'env_file' in exec_params and exec_params['env_file'] else {}
)
exec_options = exec_params.get('options', {})
exec_options['env'] = utils.composit_environment(
dotenv_env=dotenv_env,
zenvfile_env=exec_options.get('env', {}),
blacklist=exec_params.get('env_excludes', {})
)
docker_exec_options = utils.build_docker_options(exec_options)
return command, docker_exec_options
def call(config, command):
container_name = config['main']['name']
container_status = status(container_name)
# composite environments
command, exec_options = get_command_with_options(
command, config['aliases'], config['exec']
)
if container_status == const.STATUS_NOT_EXIST:
options = {'name': container_name, **config['run']['options']}
run_command, _ = get_command_with_options(
config['run']['command'], config['aliases'], {})
run(
image=config['main']['image'],
command=run_command,
options=utils.build_docker_options(options),
path=config['main']['zenvfilepath']
)
# run init commands:
for init_command in config['run']['init_commands']:
init_command, init_options = get_command_with_options(
init_command, config['aliases'], {}
)
exec_(container_name, init_command, init_options)
elif container_status == const.STATUS_STOPED:
cmd = ['docker', 'start', container_name]
logger.debug(cmd)
subprocess.run(cmd)
return exec_(container_name, command, exec_options)
def run(image, command, options, path):
cmd = ['docker', 'run', *options, image, *command]
with utils.in_directory(path):
logger.debug(cmd)
result = subprocess.run(cmd)
return result.returncode
def exec_(container_name, command, options):
cmd = ('docker', 'exec', *options, container_name, *command)
logger.debug(cmd)
return subprocess.run(cmd).returncode
def status(container_name):
cmd = (
f"docker ps --all --filter 'name={container_name}' "
"--format='{{.Status}}'"
)
logger.debug(cmd)
result = subprocess.run(cmd, shell=True, check=True, capture_output=True)
status = (
result.stdout.decode().split()[0].upper() if result.stdout else None
)
if not status:
return const.STATUS_NOT_EXIST
elif status == 'EXITED':
return const.STATUS_STOPED
elif status == 'UP':
return const.STATUS_RUNNING
def version():
cmd = 'docker version'
subprocess.run(cmd, shell=True)
def stop(container_name):
cmd = f'docker stop {container_name}'
subprocess.run(cmd, shell=True)
def rm(container_name):
current_status = status(container_name)
if current_status == const.STATUS_RUNNING:
stop(container_name)
if current_status == const.STATUS_NOT_EXIST:
return
cmd = f'docker rm {container_name}'
subprocess.run(cmd, shell=True)
def stop_all(exclude_containers=()):
"""
Stop all containers started with `zenv-`
"""
cmd = (
"docker ps --format='{{.Names}}'"
)
result = subprocess.run(cmd, shell=True, check=True, capture_output=True)
for container_name in result.stdout.decode().split('\n'):
if (
container_name.startswith(const.CONTAINER_PREFIX + '-')
and container_name not in exclude_containers
):
stop(container_name)
|
the-stack_106_30228 | #!/usr/bin/env python
from common.realtime import sec_since_boot
from cereal import car
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import EventTypes as ET, create_event
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.car.toyota.carstate import CarState, get_can_parser, get_cam_can_parser
from selfdrive.car.toyota.values import ECU, check_ecu_msgs, CAR, NO_STOP_TIMER_CAR
from selfdrive.car import STD_CARGO_KG, scale_rot_inertia, scale_tire_stiffness
from selfdrive.swaglog import cloudlog
class CarInterface(object):
def __init__(self, CP, CarController):
self.CP = CP
self.VM = VehicleModel(CP)
self.frame = 0
self.gas_pressed_prev = False
self.brake_pressed_prev = False
self.cruise_enabled_prev = False
# *** init the major players ***
self.CS = CarState(CP)
self.cp = get_can_parser(CP)
self.cp_cam = get_cam_can_parser(CP)
self.forwarding_camera = False
self.CC = None
if CarController is not None:
self.CC = CarController(self.cp.dbc_name, CP.carFingerprint, CP.enableCamera, CP.enableDsu, CP.enableApgs)
@staticmethod
def compute_gb(accel, speed):
return float(accel) / 3.0
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
return 1.0
@staticmethod
def get_params(candidate, fingerprint, vin="", is_panda_black=False):
ret = car.CarParams.new_message()
ret.carName = "toyota"
ret.carFingerprint = candidate
ret.carVin = vin
ret.isPandaBlack = is_panda_black
ret.safetyModel = car.CarParams.SafetyModel.toyota
# pedal
ret.enableCruise = not ret.enableGasInterceptor
ret.steerActuatorDelay = 0.12 # Default delay, Prius has larger delay
if candidate not in [CAR.PRIUS, CAR.RAV4, CAR.RAV4H]: # These cars use LQR/INDI
ret.lateralTuning.init('pid')
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0.], [0.]]
if candidate == CAR.PRIUS:
stop_and_go = True
ret.safetyParam = 66 # see conversion factor for STEER_TORQUE_EPS in dbc file
ret.wheelbase = 2.70
ret.steerRatio = 15.74 # unknown end-to-end spec
tire_stiffness_factor = 0.6371 # hand-tune
ret.mass = 3045. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.init('indi')
ret.lateralTuning.indi.innerLoopGain = 4.0
ret.lateralTuning.indi.outerLoopGain = 3.0
ret.lateralTuning.indi.timeConstant = 1.0
ret.lateralTuning.indi.actuatorEffectiveness = 1.0
# TODO: Determine if this is better than INDI
# ret.lateralTuning.init('lqr')
# ret.lateralTuning.lqr.scale = 1500.0
# ret.lateralTuning.lqr.ki = 0.01
# ret.lateralTuning.lqr.a = [0., 1., -0.22619643, 1.21822268]
# ret.lateralTuning.lqr.b = [-1.92006585e-04, 3.95603032e-05]
# ret.lateralTuning.lqr.c = [1., 0.]
# ret.lateralTuning.lqr.k = [-110.73572306, 451.22718255]
# ret.lateralTuning.lqr.l = [0.03233671, 0.03185757]
# ret.lateralTuning.lqr.dcGain = 0.002237852961363602
ret.steerActuatorDelay = 0.5
elif candidate in [CAR.RAV4, CAR.RAV4H]:
stop_and_go = True if (candidate in CAR.RAV4H) else False
ret.safetyParam = 73
ret.wheelbase = 2.65
ret.steerRatio = 16.88 # 14.5 is spec end-to-end
tire_stiffness_factor = 0.5533
ret.mass = 3650. * CV.LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid
ret.lateralTuning.init('lqr')
ret.lateralTuning.lqr.scale = 1500.0
ret.lateralTuning.lqr.ki = 0.05
ret.lateralTuning.lqr.a = [0., 1., -0.22619643, 1.21822268]
ret.lateralTuning.lqr.b = [-1.92006585e-04, 3.95603032e-05]
ret.lateralTuning.lqr.c = [1., 0.]
ret.lateralTuning.lqr.k = [-110.73572306, 451.22718255]
ret.lateralTuning.lqr.l = [0.3233671, 0.3185757]
ret.lateralTuning.lqr.dcGain = 0.002237852961363602
elif candidate == CAR.COROLLA:
stop_and_go = False
ret.safetyParam = 100
ret.wheelbase = 2.70
ret.steerRatio = 18.27
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 2860. * CV.LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.2], [0.05]]
ret.lateralTuning.pid.kf = 0.00003 # full torque for 20 deg at 80mph means 0.00007818594
elif candidate == CAR.LEXUS_RXH:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.79
ret.steerRatio = 16. # 14.8 is spec end-to-end
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 4481. * CV.LB_TO_KG + STD_CARGO_KG # mean between min and max
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006 # full torque for 10 deg at 80mph means 0.00007818594
elif candidate in [CAR.CHR, CAR.CHRH]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.63906
ret.steerRatio = 13.6
tire_stiffness_factor = 0.7933
ret.mass = 3300. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.723], [0.0428]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate in [CAR.CAMRY, CAR.CAMRY_TSS1, CAR.CAMRY_TSS2, CAR.CAMRYH]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.82448
ret.steerRatio = 13.7
tire_stiffness_factor = 0.7933
ret.mass = 3400. * CV.LB_TO_KG + STD_CARGO_KG #mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate in [CAR.HIGHLANDER, CAR.HIGHLANDERH]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.78
ret.steerRatio = 16.0
tire_stiffness_factor = 0.8
ret.mass = 4607. * CV.LB_TO_KG + STD_CARGO_KG #mean between normal and hybrid limited
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.18], [0.015]] # community tuning
ret.lateralTuning.pid.kf = 0.00012 # community tuning
elif candidate == CAR.AVALON:
stop_and_go = False
ret.safetyParam = 73
ret.wheelbase = 2.82
ret.steerRatio = 14.8 #Found at https://pressroom.toyota.com/releases/2016+avalon+product+specs.download
tire_stiffness_factor = 0.7983
ret.mass = 3505. * CV.LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.17], [0.03]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.RAV4_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.68986
ret.steerRatio = 14.3
tire_stiffness_factor = 0.7933
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.mass = 3370. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.COROLLA_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.63906
ret.steerRatio = 13.9
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 3060. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.LEXUS_ESH_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.8702
ret.steerRatio = 16.0 # not optimized
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 3704. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.SIENNA:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 3.03
ret.steerRatio = 16.0
tire_stiffness_factor = 0.444
ret.mass = 4590. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.05]]
ret.lateralTuning.pid.kf = 0.00007818594
ret.steerRateCost = 1.
ret.centerToFront = ret.wheelbase * 0.44
#detect the Pedal address
ret.enableGasInterceptor = 0x201 in fingerprint
# min speed to enable ACC. if car can do stop and go, then set enabling speed
# to a negative value, so it won't matter.
ret.minEnableSpeed = -1. if (stop_and_go or ret.enableGasInterceptor) else 19. * CV.MPH_TO_MS
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = scale_rot_inertia(ret.mass, ret.wheelbase)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront, ret.tireStiffnessRear = scale_tire_stiffness(ret.mass, ret.wheelbase, ret.centerToFront,
tire_stiffness_factor=tire_stiffness_factor)
# no rear steering, at least on the listed cars above
ret.steerRatioRear = 0.
ret.steerControlType = car.CarParams.SteerControlType.torque
# steer, gas, brake limitations VS speed
ret.steerMaxBP = [16. * CV.KPH_TO_MS, 45. * CV.KPH_TO_MS] # breakpoints at 1 and 40 kph
ret.steerMaxV = [1., 1.] # 2/3rd torque allowed above 45 kph
ret.brakeMaxBP = [0.]
ret.brakeMaxV = [1.]
ret.enableCamera = not check_ecu_msgs(fingerprint, ECU.CAM) or is_panda_black
ret.enableDsu = not check_ecu_msgs(fingerprint, ECU.DSU)
ret.enableApgs = False #not check_ecu_msgs(fingerprint, ECU.APGS)
ret.openpilotLongitudinalControl = ret.enableCamera and ret.enableDsu
cloudlog.warn("ECU Camera Simulated: %r", ret.enableCamera)
cloudlog.warn("ECU DSU Simulated: %r", ret.enableDsu)
cloudlog.warn("ECU APGS Simulated: %r", ret.enableApgs)
cloudlog.warn("ECU Gas Interceptor: %r", ret.enableGasInterceptor)
ret.steerLimitAlert = False
ret.longitudinalTuning.deadzoneBP = [0., 9.]
ret.longitudinalTuning.deadzoneV = [0., .15]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.stoppingControl = False
ret.startAccel = 0.0
if ret.enableGasInterceptor:
ret.gasMaxBP = [0., 9., 35]
ret.gasMaxV = [0.2, 0.5, 0.7]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiV = [0.18, 0.12]
else:
ret.gasMaxBP = [0.]
ret.gasMaxV = [0.5]
ret.longitudinalTuning.kpV = [3.6, 2.4, 1.5]
ret.longitudinalTuning.kiV = [0.54, 0.36]
return ret
# returns a car.CarState
def update(self, c, can_strings):
# ******************* do can recv *******************
self.cp.update_strings(int(sec_since_boot() * 1e9), can_strings)
# run the cam can update for 10s as we just need to know if the camera is alive
if self.frame < 1000:
self.cp_cam.update_strings(int(sec_since_boot() * 1e9), can_strings)
self.CS.update(self.cp)
# create message
ret = car.CarState.new_message()
ret.canValid = self.cp.can_valid
# speeds
ret.vEgo = self.CS.v_ego
ret.vEgoRaw = self.CS.v_ego_raw
ret.aEgo = self.CS.a_ego
ret.yawRate = self.VM.yaw_rate(self.CS.angle_steers * CV.DEG_TO_RAD, self.CS.v_ego)
ret.standstill = self.CS.standstill
ret.wheelSpeeds.fl = self.CS.v_wheel_fl
ret.wheelSpeeds.fr = self.CS.v_wheel_fr
ret.wheelSpeeds.rl = self.CS.v_wheel_rl
ret.wheelSpeeds.rr = self.CS.v_wheel_rr
# gear shifter
ret.gearShifter = self.CS.gear_shifter
# gas pedal
ret.gas = self.CS.car_gas
if self.CP.enableGasInterceptor:
# use interceptor values to disengage on pedal press
ret.gasPressed = self.CS.pedal_gas > 15
else:
ret.gasPressed = self.CS.pedal_gas > 0
# brake pedal
ret.brake = self.CS.user_brake
ret.brakePressed = self.CS.brake_pressed != 0
ret.brakeLights = self.CS.brake_lights
# steering wheel
ret.steeringAngle = self.CS.angle_steers
ret.steeringRate = self.CS.angle_steers_rate
ret.steeringTorque = self.CS.steer_torque_driver
ret.steeringTorqueEps = self.CS.steer_torque_motor
ret.steeringPressed = self.CS.steer_override
# cruise state
ret.cruiseState.enabled = self.CS.pcm_acc_active
ret.cruiseState.speed = self.CS.v_cruise_pcm * CV.KPH_TO_MS
ret.cruiseState.available = bool(self.CS.main_on)
ret.cruiseState.speedOffset = 0.
if self.CP.carFingerprint in NO_STOP_TIMER_CAR or self.CP.enableGasInterceptor:
# ignore standstill in hybrid vehicles, since pcm allows to restart without
# receiving any special command
# also if interceptor is detected
ret.cruiseState.standstill = False
else:
ret.cruiseState.standstill = self.CS.pcm_acc_status == 7
buttonEvents = []
if self.CS.left_blinker_on != self.CS.prev_left_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'leftBlinker'
be.pressed = self.CS.left_blinker_on != 0
buttonEvents.append(be)
if self.CS.right_blinker_on != self.CS.prev_right_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'rightBlinker'
be.pressed = self.CS.right_blinker_on != 0
buttonEvents.append(be)
ret.buttonEvents = buttonEvents
ret.leftBlinker = bool(self.CS.left_blinker_on)
ret.rightBlinker = bool(self.CS.right_blinker_on)
ret.doorOpen = not self.CS.door_all_closed
ret.seatbeltUnlatched = not self.CS.seatbelt
ret.genericToggle = self.CS.generic_toggle
# events
events = []
if self.cp_cam.can_valid:
self.forwarding_camera = True
if not ret.gearShifter == 'drive' and self.CP.enableDsu:
events.append(create_event('wrongGear', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.doorOpen:
events.append(create_event('doorOpen', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.seatbeltUnlatched:
events.append(create_event('seatbeltNotLatched', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if self.CS.esp_disabled and self.CP.enableDsu:
events.append(create_event('espDisabled', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if not self.CS.main_on and self.CP.enableDsu:
events.append(create_event('wrongCarMode', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gearShifter == 'reverse' and self.CP.enableDsu:
events.append(create_event('reverseGear', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if self.CS.steer_error:
events.append(create_event('steerTempUnavailable', [ET.NO_ENTRY, ET.WARNING]))
if self.CS.low_speed_lockout and self.CP.enableDsu:
events.append(create_event('lowSpeedLockout', [ET.NO_ENTRY, ET.PERMANENT]))
if ret.vEgo < self.CP.minEnableSpeed and self.CP.enableDsu:
events.append(create_event('speedTooLow', [ET.NO_ENTRY]))
if c.actuators.gas > 0.1:
# some margin on the actuator to not false trigger cancellation while stopping
events.append(create_event('speedTooLow', [ET.IMMEDIATE_DISABLE]))
if ret.vEgo < 0.001:
# while in standstill, send a user alert
events.append(create_event('manualRestart', [ET.WARNING]))
# enable request in prius is simple, as we activate when Toyota is active (rising edge)
if ret.cruiseState.enabled and not self.cruise_enabled_prev:
events.append(create_event('pcmEnable', [ET.ENABLE]))
elif not ret.cruiseState.enabled:
events.append(create_event('pcmDisable', [ET.USER_DISABLE]))
# disable on pedals rising edge or when brake is pressed and speed isn't zero
if (ret.gasPressed and not self.gas_pressed_prev) or \
(ret.brakePressed and (not self.brake_pressed_prev or ret.vEgo > 0.001)):
events.append(create_event('pedalPressed', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gasPressed:
events.append(create_event('pedalPressed', [ET.PRE_ENABLE]))
ret.events = events
self.gas_pressed_prev = ret.gasPressed
self.brake_pressed_prev = ret.brakePressed
self.cruise_enabled_prev = ret.cruiseState.enabled
return ret.as_reader()
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c):
can_sends = self.CC.update(c.enabled, self.CS, self.frame,
c.actuators, c.cruiseControl.cancel, c.hudControl.visualAlert,
self.forwarding_camera, c.hudControl.leftLaneVisible,
c.hudControl.rightLaneVisible, c.hudControl.leadVisible,
c.hudControl.leftLaneDepart, c.hudControl.rightLaneDepart)
self.frame += 1
return can_sends
|
the-stack_106_30231 | class Solution:
def shortestDistance(self, wordsDict: List[str], word1: str, word2: str) -> int:
"""
makes: [1,5,7,10]
^
coding: [3,9,12]
^
"""
word1_idx = []
word2_idx = []
for idx, word in enumerate(wordsDict):
if word == word1:
word1_idx.append(idx)
elif word == word2:
word2_idx.append(idx)
idx1, idx2 = 0, 0
min_dist = float("inf")
while idx1 < len(word1_idx) or idx2 < len(word2_idx):
num1 = word1_idx[idx1] if idx1 < len(word1_idx) else float("inf")
num2 = word2_idx[idx2] if idx2 < len(word2_idx) else float("inf")
min_dist = min(min_dist, abs(num1-num2))
if num1 < num2:
idx1 += 1
else:
idx2 += 1
return min_dist |
the-stack_106_30232 | from muse.util import RandomFactory
from muse.util import TestRunner
def testGenIntN() -> bool:
value = 0
for _ in range(8192):
if RandomFactory.genIntN(0, 0) != 0:
return False
if RandomFactory.genIntN(1, 1) != 1:
return False
value = RandomFactory.genIntN(0, 1)
if value < 0 or value > 1:
return False
value = RandomFactory.genIntN(100, 10000)
if RandomFactory.genIntN(value, value) != value:
return False
if value < 100 or value > 10000:
return False
return True
def testGenEven() -> bool:
for _ in range(8192):
if (RandomFactory.genEven() & 1) != 0:
return False
return True
def testGenOdd() -> bool:
for _ in range(8192):
if (RandomFactory.genOdd() & 1) == 0:
return False
return True
def main() -> None:
TestRunner.pick(testGenIntN)
TestRunner.pick(testGenEven)
TestRunner.pick(testGenOdd)
if __name__ == "__main__":
main()
|
the-stack_106_30234 | from setuptools import setup
from setuptools import find_namespace_packages
# Open the README file.
with open(file="README.md", mode="r") as fh:
long_description = fh.read()
setup(
name='federal-reserve-python-api',
# Define Author Info.
author='Alex Reed',
author_email='[email protected]',
# Define Version Info.
version='0.1.0',
# Define descriptions.
description='A Python application used to pull data from the US Federal Reserve.',
long_description=long_description,
long_description_content_type="text/markdown",
# Define repo location.
url='https://github.com/areed1192/us-federal-reserve-python-api',
# Define dependencies.
install_requires=[
'requests==2.24.0'
],
# Specify folder content.
packages=find_namespace_packages(
include=['fred']
),
# Define the python version.
python_requires='>3.6',
# Define our classifiers.
classifiers=[
# Phase of development my library is in.
'Development Status :: 3 - Alpha',
# Audience this library is intended for.
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Financial and Insurance Industry',
# License that guides my library.
'License :: OSI Approved :: MIT License',
# Package was written in English.
'Natural Language :: English',
# Operating systems.
'Operating System :: OS Independent',
# Programming Languages Used..
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# Topics.
'Topic :: Database',
'Topic :: Education',
'Topic :: Office/Business'
]
)
|
the-stack_106_30235 | # qubit number=4
# total number=39
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.x(input_qubit[3]) # number=1
prog.h(input_qubit[0]) # number=18
prog.x(input_qubit[1]) # number=28
prog.cz(input_qubit[3],input_qubit[0]) # number=19
prog.h(input_qubit[2]) # number=24
prog.h(input_qubit[0]) # number=20
prog.rx(-1.8378317023500288,input_qubit[1]) # number=25
prog.z(input_qubit[3]) # number=14
prog.cx(input_qubit[3],input_qubit[0]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[3]) # number=16
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[3]) # number=29
prog.cz(input_qubit[0],input_qubit[3]) # number=30
prog.h(input_qubit[3]) # number=31
prog.cx(input_qubit[0],input_qubit[3]) # number=36
prog.x(input_qubit[3]) # number=37
prog.cx(input_qubit[0],input_qubit[3]) # number=38
prog.cx(input_qubit[0],input_qubit[3]) # number=23
prog.cx(input_qubit[1],input_qubit[0]) # number=32
prog.z(input_qubit[1]) # number=33
prog.cx(input_qubit[1],input_qubit[0]) # number=34
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[2]) # number=12
prog.z(input_qubit[1]) # number=27
prog.h(input_qubit[1]) # number=35
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy3202.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_106_30236 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.iam_credentials_v1.types import common
from .base import IAMCredentialsTransport, DEFAULT_CLIENT_INFO
class IAMCredentialsGrpcTransport(IAMCredentialsTransport):
"""gRPC backend transport for IAMCredentials.
A service account is a special type of Google account that
belongs to your application or a virtual machine (VM), instead
of to an individual end user. Your application assumes the
identity of the service account to call Google APIs, so that the
users aren't directly involved.
Service account credentials are used to temporarily assume the
identity of the service account. Supported credential types
include OAuth 2.0 access tokens, OpenID Connect ID tokens, self-
signed JSON Web Tokens (JWTs), and more.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "iamcredentials.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "iamcredentials.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def generate_access_token(
self,
) -> Callable[
[common.GenerateAccessTokenRequest], common.GenerateAccessTokenResponse
]:
r"""Return a callable for the generate access token method over gRPC.
Generates an OAuth 2.0 access token for a service
account.
Returns:
Callable[[~.GenerateAccessTokenRequest],
~.GenerateAccessTokenResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "generate_access_token" not in self._stubs:
self._stubs["generate_access_token"] = self.grpc_channel.unary_unary(
"/google.iam.credentials.v1.IAMCredentials/GenerateAccessToken",
request_serializer=common.GenerateAccessTokenRequest.serialize,
response_deserializer=common.GenerateAccessTokenResponse.deserialize,
)
return self._stubs["generate_access_token"]
@property
def generate_id_token(
self,
) -> Callable[[common.GenerateIdTokenRequest], common.GenerateIdTokenResponse]:
r"""Return a callable for the generate id token method over gRPC.
Generates an OpenID Connect ID token for a service
account.
Returns:
Callable[[~.GenerateIdTokenRequest],
~.GenerateIdTokenResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "generate_id_token" not in self._stubs:
self._stubs["generate_id_token"] = self.grpc_channel.unary_unary(
"/google.iam.credentials.v1.IAMCredentials/GenerateIdToken",
request_serializer=common.GenerateIdTokenRequest.serialize,
response_deserializer=common.GenerateIdTokenResponse.deserialize,
)
return self._stubs["generate_id_token"]
@property
def sign_blob(self) -> Callable[[common.SignBlobRequest], common.SignBlobResponse]:
r"""Return a callable for the sign blob method over gRPC.
Signs a blob using a service account's system-managed
private key.
Returns:
Callable[[~.SignBlobRequest],
~.SignBlobResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "sign_blob" not in self._stubs:
self._stubs["sign_blob"] = self.grpc_channel.unary_unary(
"/google.iam.credentials.v1.IAMCredentials/SignBlob",
request_serializer=common.SignBlobRequest.serialize,
response_deserializer=common.SignBlobResponse.deserialize,
)
return self._stubs["sign_blob"]
@property
def sign_jwt(self) -> Callable[[common.SignJwtRequest], common.SignJwtResponse]:
r"""Return a callable for the sign jwt method over gRPC.
Signs a JWT using a service account's system-managed
private key.
Returns:
Callable[[~.SignJwtRequest],
~.SignJwtResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "sign_jwt" not in self._stubs:
self._stubs["sign_jwt"] = self.grpc_channel.unary_unary(
"/google.iam.credentials.v1.IAMCredentials/SignJwt",
request_serializer=common.SignJwtRequest.serialize,
response_deserializer=common.SignJwtResponse.deserialize,
)
return self._stubs["sign_jwt"]
__all__ = ("IAMCredentialsGrpcTransport",)
|
the-stack_106_30237 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# Enable ascii color control to stdout
stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
stdout_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode))
kernel32.SetConsoleMode(stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# Enable ascii color control to stderr
stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE)
stderr_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode))
kernel32.SetConsoleMode(stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
GREEN = ('\033[0m', '\033[0;32m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS = [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_fee_estimation.py',
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'mining_getblocktemplate_longpoll.py',
'feature_maxuploadtarget.py',
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_labels.py',
'p2p_segwit.py',
'p2p_timeouts.py',
'wallet_dump.py',
'wallet_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_bip68_sequence.py',
'p2p_feefilter.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'tool_wallet.py',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'rpc_misc.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'wallet_createwallet.py',
'wallet_createwallet.py --usecli',
'interface_http.py',
'interface_rpc.py',
'rpc_psbt.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'wallet_groups.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'p2p_blocksonly.py',
'mining_prioritisetransaction.py',
'p2p_invalid_locator.py',
'p2p_invalid_block.py',
'p2p_invalid_messages.py',
'p2p_invalid_tx.py',
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
'mempool_packages.py',
'rpc_createmultisig.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'p2p_leak_tx.py',
'rpc_signmessage.py',
'wallet_balance.py',
'feature_nulldummy.py',
'mempool_accept.py',
'wallet_import_rescan.py',
'wallet_import_with_label.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'wallet_scriptaddress2.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_fallbackfee.py',
'feature_minchainwork.py',
'rpc_getblockstats.py',
'wallet_create_tx.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'wallet_coinbase_category.py',
'feature_filelock.py',
'p2p_unrequested_blocks.py',
'feature_includeconf.py',
'rpc_deriveaddresses.py',
'rpc_deriveaddresses.py --usecli',
'rpc_scantxoutset.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_blocksdir.py',
'feature_config_args.py',
'rpc_help.py',
'feature_help.py',
'feature_shutdown.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
'feature_dbcrash.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--ci', action='store_true', help='Run checks and code that are usually only enabled in a continuous integration environment')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/test_runner_Ł_🏃_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if not enable_bitcoind:
print("No functional tests to run.")
print("Rerun ./configure with --with-daemon and then make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [test + ".py" if ".py" not in test else test for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
# Remove <test_name>.py and <test_name>.py --arg from the test list
exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test]
for exclude_item in exclude_list:
test_list.remove(exclude_item)
if not exclude_list:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci)
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list=test_list,
src_dir=config["environment"]["SRCDIR"],
build_dir=config["environment"]["BUILDDIR"],
tmpdir=tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
runs_ci=args.ci,
)
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, runs_ci):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "diamondhandscoind"]) is not None:
print("%sWARNING!%s There is already a diamondhandscoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(
num_tests_parallel=jobs,
tests_dir=tests_dir,
tmpdir=tmpdir,
test_list=test_list,
flags=flags,
timeout_duration=40 * 60 if runs_ci else float('inf'), # in seconds
)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
for i in range(test_count):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
if test_result.status == "Passed":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
elif test_result.status == "Skipped":
logging.debug("%s skipped" % (done_str))
else:
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
if BOLD[0]:
combined_logs_args += ['--color']
combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, timeout_duration):
assert num_tests_parallel >= 1
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.timeout_duration = timeout_duration
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
dot_count = 0
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if int(time.time() - start_time) > self.timeout_duration:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
dot_count += 1
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = GREEN
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(*, src_dir, fail_on_warn):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if fail_on_warn:
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `diamondhandscoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, _, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
the-stack_106_30240 | # Copyright 2018-2022 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to copy this repository to blender code repository
from os import walk, makedirs, remove
import argparse
from os.path import dirname, realpath, isdir, splitext, join, isfile
from shutil import copyfile
ap = argparse.ArgumentParser()
ap.add_argument("-r", "--repo", required=False, help="repo path")
ap.add_argument("-b", "--bump", required=False, action="store_true", help="bump to +1 minor version number")
ap.add_argument("-w", "--rewrite", required=False, action="store_true", help="rewrite SPDX license identifiers")
args = vars(ap.parse_args())
root = dirname(realpath(__file__))
INPUT = root + "/../addons/"
# On glTF-Blender-IO repo, increase version number if needed
if args["bump"] is True:
if args["repo"] is None:
print("You can't bump to new version if --repo is not set")
sys.exit()
if args["repo"][-1] != "/":
args["repo"] += "/"
init_file = INPUT + "io_scene_gltf2/__init__.py"
if not isfile(init_file):
print("Can't find __init__ file")
sys.exit()
data = ""
new_line = ""
with open(init_file, "r") as f_read:
data = f_read.read()
for l in data.split("\n"):
if "\"version\"" in l:
try:
versions = l.split('(')[1].split(')')[0].split(',')
if len(versions) != 3:
print("Can't find version properly")
sys.exit()
new_line = " \"version\": (" + versions[0] + "," + versions[1] + ", " + str(int(versions[2]) + 1) + "),"
break
except:
print("Can't find version")
sys.exit()
with open(init_file, "w") as f_write:
for idx, l in enumerate(data.split("\n")):
if "\"version\"" in l:
f_write.write(new_line + "\n")
else:
if idx == len(data.split("\n")) - 1:
f_write.write(l)
else:
f_write.write(l + "\n")
# Copy it on blender repo
if args["repo"] is not None:
if args["repo"][-1] != "/":
args["repo"] += "/"
for root, dirs, files in walk(INPUT):
new_dir = args["repo"] + root[len(INPUT):]
if not isdir(new_dir):
makedirs(new_dir)
for file in files:
filename, fileext = splitext(file)
if fileext != ".py":
continue
if args["rewrite"] is False:
copyfile(root + "/" + file, new_dir + "/" + file)
else:
start_of_file = True
with open(root + "/" + file, "r") as fr:
with open(new_dir + "/" + file, 'w') as fw:
fw.write("# SPDX-License-Identifier: Apache-2.0\n")
for idx, l in enumerate(fr.readlines()):
if idx == 0:
fw.write(l)
else:
if start_of_file is True and l[0] == "#":
continue
elif start_of_file is True and l[0] != "#":
start_of_file = False
if start_of_file is False:
fw.write(l)
# Check that files removed are also removed in blender repo
for root, dirs, files in walk(join(args["repo"], "io_scene_gltf2")):
for file in files:
if not isfile(join(INPUT, join(root[len(args["repo"]):], file))):
print(join(root[len(args["repo"]):], file))
remove(join(args["repo"], join(root[len(args["repo"]):], file)))
|
the-stack_106_30243 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from azure.cli.core.commands.parameters import resource_group_name_type, get_enum_type
from azure.cli.core.util import get_json_object
def load_arguments(self, _): # pylint: disable=too-many-statements
with self.argument_context('sf') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, id_part=None, help='The resource group name')
c.argument('cluster_name', options_list=['--name', '--cluster-name', '-n'], help='Specify the name of the cluster, if not given it will be same as resource group name')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('secret_identifier', help='The existing Azure key vault secret URL')
c.argument('certificate_file', help='The existing certificate file path for the primary cluster certificate.')
c.argument('parameter_file', help='The path to the template parameter file.')
c.argument('template_file', help='The path to the template file.')
c.argument('vm_password', help='The password of the Vm')
c.argument('certificate_output_folder', help='The folder of the new certificate file to be created.')
c.argument('certificate_password', help='The password of the certificate file.')
c.argument('certificate_subject_name', help='The subject name of the certificate to be created.')
c.argument('vault_resource_group_name', options_list=['--vault-resource-group'], help='Key vault resource group name,if not given it will be cluster resource group name')
c.argument('vault_name', help='Azure key vault name, it not given it will be the cluster resource group name')
c.argument('cluster_size', options_list=['--cluster-size', '-s'], help='The number of nodes in the cluster. Default are 5 nodes')
c.argument('vm_sku', help='The Vm Sku')
c.argument('vm_user_name', help='The user name for logging to Vm. Default will be adminuser')
c.argument('vm_os', arg_type=get_enum_type(['WindowsServer2012R2Datacenter',
'WindowsServer2016Datacenter',
'WindowsServer2016DatacenterwithContainers',
'UbuntuServer1604']),
default='WindowsServer2016Datacenter', options_list=['--vm-os', '--os'],
help='The Operating System of the VMs that make up the cluster.')
c.argument('node_type', help='the Node type name.')
with self.argument_context('sf cluster list') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, id_part=None, help='The resource group name')
with self.argument_context('sf client certificate') as c:
c.argument('certificate_common_name', help='client certificate common name.')
c.argument('admin_client_thumbprints', nargs='+', help='Space-separated list of client certificate thumbprint that only has admin permission, ')
c.argument('certificate_issuer_thumbprint', help='client certificate issuer thumbprint.')
with self.argument_context('sf cluster certificate') as c:
c.argument('thumbprint', help='The cluster certificate thumbprint to be removed')
with self.argument_context('sf cluster client-certificate') as c:
c.argument('is_admin', help='Client authentication type.')
c.argument('certificate_issuer_thumbprint', help='client certificate issuer thumbprint.')
c.argument('certificate_common_name', help='client certificate common name.')
c.argument('admin_client_thumbprints', nargs='+', help='client certificate thumbprint that only has admin permission.')
c.argument('readonly_client_thumbprints', nargs='+', help='Space-separated list of client certificate thumbprint that has read only permission.')
with self.argument_context('sf cluster client-certificate add') as c:
c.argument('thumbprint', help='client certificate thumbprint.')
with self.argument_context('sf cluster client-certificate remove') as c:
c.argument('thumbprints', nargs='+', help='A single or Space-separated list of client certificate thumbprint(s) to be remove.')
with self.argument_context('sf cluster node') as c:
c.argument('number_of_nodes_to_add', help='number of nodes to add.')
c.argument('number_of_nodes_to_remove', help='number of nodes to remove.')
with self.argument_context('sf cluster durability') as c:
c.argument('durability_level', arg_type=get_enum_type(['Bronze', 'Silver', 'Gold']), help='durability level.')
with self.argument_context('sf cluster setting') as c:
c.argument('parameter', help='parameter name')
c.argument('section', help='section name')
c.argument('value', help='Specify the value')
c.argument('settings_section_description', help='Specify the value')
with self.argument_context('sf cluster upgrade-type set') as c:
c.argument('version', help='cluster code version')
c.argument('upgrade_mode', arg_type=get_enum_type(['manual', 'automatic']), help='cluster upgrade mode')
with self.argument_context('sf cluster reliability') as c:
c.argument('reliability_level', arg_type=get_enum_type(['Bronze', 'Silver', 'Gold']), help='durability level.')
c.argument('auto_add_node', help='Add node count automatically when changing reliability.')
with self.argument_context('sf cluster setting set') as c:
c.argument('settings_section_description', type=get_json_object,
help='JSON encoded parameters configuration. Use @{file} to load from a file. '
'For example: [{"section": "NamingService","parameter": "MaxOperationTimeout","value": 1000},{"section": "MaxFileOperationTimeout","parameter": "Max2","value": 1000]')
with self.argument_context('sf cluster setting remove') as c:
c.argument('settings_section_description', type=get_json_object,
help='JSON encoded parameters configuration. Use @{file} to load from a file. '
'For example: [{"section": "NamingService","parameter": "MaxOperationTimeout"}]')
with self.argument_context('sf cluster client-certificate remove') as c:
c.argument('client_certificate_common_names', type=get_json_object,
help='JSON encoded parameters configuration. Use @{file} to load from a file. '
'For example: [{"certificateCommonName": "test.com","certificateIssuerThumbprint": "22B4AE296B504E512DF880A77A2CAE20200FF922"}]')
with self.argument_context('sf cluster client-certificate add') as c:
c.argument('client_certificate_common_names', type=get_json_object,
help='JSON encoded parameters configuration. Use @{file} to load from a file. '
'For example: [{"isAdmin":true, "certificateCommonName": "test.com", '
'"certificateIssuerThumbprint": "22B4AE296B504E512DF880A77A2CAE20200FF922"}]')
|
the-stack_106_30245 | # Copyright (c) 2004-2015 Odoo S.A.
# Copyright 2018 Kolushov Alexandr <https://it-projects.info/team/KolushovAlexandr>
# License MIT (https://opensource.org/licenses/MIT).
# pylint: disable=sql-injection
from random import choice
from string import digits
from odoo import SUPERUSER_ID, _, api, exceptions, fields, models
class HrPartner(models.Model):
_inherit = "res.partner"
_description = "Partner"
def _default_random_pin(self):
return "".join(choice(digits) for i in range(4))
# def _default_random_barcode(self):
# barcode = None
# while not barcode or self.env['res.partner'].search([('barcode', '=', barcode)]):
# barcode = "".join(choice(digits) for i in range(8))
# return barcode
# barcode = fields.Char(string="Badge ID", help="ID used for partner identification.",
# default=_default_random_barcode, copy=False)
pin = fields.Char(
string="Attendance PIN",
default=_default_random_pin,
help="PIN used to Check In/Out in Kiosk Mode (if enabled in Configuration).",
copy=False,
)
partners_attendance_ids = fields.One2many(
"res.partner.attendance",
"partner_id",
help="list of attendances for the employee",
)
last_attendance_id = fields.Many2one(
"res.partner.attendance", compute="_compute_last_attendance_id"
)
attendance_state = fields.Selection(
string="Attendance",
compute="_compute_attendance_state",
selection=[("checked_out", "Checked out"), ("checked_in", "Checked in")],
)
# _sql_constraints = [('barcode_uniq', 'unique (barcode)',
# "The Badge ID must be unique, this one is already assigned to another employee.")]
@api.depends("partners_attendance_ids")
def _compute_last_attendance_id(self):
for partner in self:
partner.last_attendance_id = (
partner.partners_attendance_ids
and partner.partners_attendance_ids[0]
or False
)
@api.depends(
"last_attendance_id.check_in",
"last_attendance_id.check_out",
"last_attendance_id",
)
def _compute_attendance_state(self):
for partner in self:
partner.attendance_state = (
partner.last_attendance_id
and not partner.last_attendance_id.check_out
and "checked_in"
or "checked_out"
)
@api.constrains("pin")
def _verify_pin(self):
for partner in self:
if partner.pin and not partner.pin.isdigit():
raise exceptions.ValidationError(
_("The PIN must be a sequence of digits.")
)
@api.model
def attendance_scan(self, barcode):
""" Receive a barcode scanned from the Kiosk Mode and change the attendances of corresponding partner.
Returns either an action or a warning.
"""
partner = self.search([("barcode", "=", barcode)], limit=1)
return (
partner
and partner.attendance_action(
"base_attendance.hr_attendance_action_kiosk_mode"
)
or {
"warning": _("No partner corresponding to barcode %(barcode)s")
% {"barcode": barcode}
}
)
def attendance_manual(self, next_action, entered_pin=None):
self.ensure_one()
if not (entered_pin is None) or self.env["res.users"].browse(
SUPERUSER_ID
).has_group("base_attendance.group_hr_attendance_use_pin"):
if entered_pin != self.pin:
return {"warning": _("Wrong PIN")}
return self.attendance_action(next_action)
def attendance_action(self, next_action):
""" Changes the attendance of the partner.
Returns an action to the check in/out message,
next_action defines which menu the check in/out message should return to. ("My Attendances" or "Kiosk Mode")
"""
self.ensure_one()
action_message = self.env.ref(
"base_attendance.hr_attendance_action_greeting_message"
).read()[0]
action_message["previous_attendance_change_date"] = (
self.last_attendance_id
and (self.last_attendance_id.check_out or self.last_attendance_id.check_in)
or False
)
action_message["partner_name"] = self.name
action_message["next_action"] = next_action
modified_attendance = self.sudo().attendance_action_change()
action_message["attendance"] = modified_attendance.read()[0]
return {"action": action_message}
def attendance_action_change(self):
""" Check In/Check Out action
Check In: create a new attendance record
Check Out: modify check_out field of appropriate attendance record
"""
if len(self) > 1:
raise exceptions.UserError(
_("Cannot perform check in or check out on multiple partners.")
)
action_date = fields.Datetime.now()
if self.attendance_state != "checked_in":
vals = {"partner_id": self.id, "check_in": action_date}
return self.env["res.partner.attendance"].create(vals)
else:
attendance = self.env["res.partner.attendance"].search(
[("partner_id", "=", self.id), ("check_out", "=", False)], limit=1
)
if attendance:
attendance.check_out = action_date
else:
raise exceptions.UserError(
_(
"Cannot perform check out on %(empl_name)s, could not find corresponding check in. "
"Your attendances have probably been modified manually by human resources."
)
% {"empl_name": self.name}
)
return attendance
def _init_column(self, column_name):
""" Initialize the value of the given column for existing rows.
Overridden here because we need to have different default values
for barcode and pin for every partner.
"""
# if column_name not in ["barcode", "pin"]:
if column_name not in ["pin"]:
super(HrPartner, self)._init_column(column_name)
else:
default_compute = self._fields[column_name].default
query = 'SELECT id FROM "{}" WHERE "{}" is NULL'.format(
self._table, column_name
)
self.env.cr.execute(query)
partner_ids = self.env.cr.fetchall()
for partner_id in partner_ids:
default_value = default_compute(self)
query = 'UPDATE "{}" SET "{}"=%s WHERE id = {}'.format(
self._table, column_name, partner_id[0]
)
self.env.cr.execute(query, (default_value,))
|
the-stack_106_30246 | """
Code to manage the creation and SQL rendering of 'where' constraints.
"""
from django.core.exceptions import EmptyResultSet
from django.utils import tree
from django.utils.functional import cached_property
# Connection types
AND = 'AND'
OR = 'OR'
class WhereNode(tree.Node):
"""
Used to represent the SQL where-clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
A child is usually an expression producing boolean values. Most likely the
expression is a Lookup instance.
However, a child could also be any class with as_sql() and either
relabeled_clone() method or relabel_aliases() and clone() methods and
contains_aggregate attribute.
"""
default = AND
def split_having(self, negated=False):
"""
Returns two possibly None nodes: one for those parts of self that
should be included in the WHERE clause and one for those parts of
self that must be included in the HAVING clause.
"""
if not self.contains_aggregate:
return self, None
in_negated = negated ^ self.negated
# If the effective connector is OR and this node contains an aggregate,
# then we need to push the whole branch to HAVING clause.
may_need_split = (
(in_negated and self.connector == AND) or
(not in_negated and self.connector == OR))
if may_need_split and self.contains_aggregate:
return None, self
where_parts = []
having_parts = []
for c in self.children:
if hasattr(c, 'split_having'):
where_part, having_part = c.split_having(in_negated)
if where_part is not None:
where_parts.append(where_part)
if having_part is not None:
having_parts.append(having_part)
elif c.contains_aggregate:
having_parts.append(c)
else:
where_parts.append(c)
having_node = self.__class__(having_parts, self.connector, self.negated) if having_parts else None
where_node = self.__class__(where_parts, self.connector, self.negated) if where_parts else None
return where_node, having_node
def as_sql(self, compiler, connection):
"""
Returns the SQL version of the where clause and the value to be
substituted in. Returns '', [] if this node matches everything,
None, [] if this node is empty, and raises EmptyResultSet if this
node can't match anything.
"""
result = []
result_params = []
if self.connector == AND:
full_needed, empty_needed = len(self.children), 1
else:
full_needed, empty_needed = 1, len(self.children)
for child in self.children:
try:
sql, params = compiler.compile(child)
except EmptyResultSet:
empty_needed -= 1
else:
if sql:
result.append(sql)
result_params.extend(params)
else:
full_needed -= 1
# Check if this node matches nothing or everything.
# First check the amount of full nodes and empty nodes
# to make this node empty/full.
# Now, check if this node is full/empty using the
# counts.
if empty_needed == 0:
if self.negated:
return '', []
else:
raise EmptyResultSet
if full_needed == 0:
if self.negated:
raise EmptyResultSet
else:
return '', []
conn = ' %s ' % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
# Some backends (Oracle at least) need parentheses
# around the inner SQL in the negated case, even if the
# inner SQL contains just a single expression.
sql_string = 'NOT (%s)' % sql_string
elif len(result) > 1:
sql_string = '(%s)' % sql_string
return sql_string, result_params
def get_group_by_cols(self):
cols = []
for child in self.children:
cols.extend(child.get_group_by_cols())
return cols
def relabel_aliases(self, change_map):
"""
Relabels the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
for pos, child in enumerate(self.children):
if hasattr(child, 'relabel_aliases'):
# For example another WhereNode
child.relabel_aliases(change_map)
elif hasattr(child, 'relabeled_clone'):
self.children[pos] = child.relabeled_clone(change_map)
def clone(self):
"""
Creates a clone of the tree. Must only be called on root nodes (nodes
with empty subtree_parents). Childs must be either (Contraint, lookup,
value) tuples, or objects supporting .clone().
"""
clone = self.__class__._new_instance(
children=[], connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, 'clone'):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
def relabeled_clone(self, change_map):
clone = self.clone()
clone.relabel_aliases(change_map)
return clone
@classmethod
def _contains_aggregate(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_aggregate(c) for c in obj.children)
return obj.contains_aggregate
@cached_property
def contains_aggregate(self):
return self._contains_aggregate(self)
class NothingNode(object):
"""
A node that matches nothing.
"""
contains_aggregate = False
def as_sql(self, compiler=None, connection=None):
raise EmptyResultSet
class ExtraWhere(object):
# The contents are a black box - assume no aggregates are used.
contains_aggregate = False
def __init__(self, sqls, params):
self.sqls = sqls
self.params = params
def as_sql(self, compiler=None, connection=None):
sqls = ["(%s)" % sql for sql in self.sqls]
return " AND ".join(sqls), list(self.params or ())
class SubqueryConstraint(object):
# Even if aggregates would be used in a subquery, the outer query isn't
# interested about those.
contains_aggregate = False
def __init__(self, alias, columns, targets, query_object):
self.alias = alias
self.columns = columns
self.targets = targets
self.query_object = query_object
def as_sql(self, compiler, connection):
query = self.query_object
# QuerySet was sent
if hasattr(query, 'values'):
if query._db and connection.alias != query._db:
raise ValueError("Can't do subqueries with queries on different DBs.")
# Do not override already existing values.
if query._fields is None:
query = query.values(*self.targets)
else:
query = query._clone()
query = query.query
if query.can_filter():
# If there is no slicing in use, then we can safely drop all ordering
query.clear_ordering(True)
query_compiler = query.get_compiler(connection=connection)
return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
|
the-stack_106_30247 | import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='python-amazon-paapi',
version='3.3.1',
author='Sergio Abad',
author_email='[email protected]',
description='Amazon Product Advertising API 5.0 wrapper for Python',
long_description=long_description,
long_description_content_type='text/markdown',
license='MIT',
url='https://github.com/sergioteula/python-amazon-paapi',
packages=setuptools.find_packages(),
install_requires=['certifi',
'six',
'python_dateutil',
'setuptools',
'urllib3'],
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=2.7',
)
|
the-stack_106_30248 | from keras.models import Sequential
from keras.layers import *
from skimage.measure import compare_psnr
import numpy as np
import keras.backend as K
from keras.callbacks import *
def psnr(base_image,altered_image):
try:
MSE=K.mean(K.square(base_image-altered_image))
if(MSE==0):
return 100
else:
return 20*K.log(255.0/K.sqrt(MSE))
except Exception as e:
print(e)
return K.constant(100)
def advance_relu(input_):
return K.relu( input_,alpha=0.0, max_value=255.0)
import pickle
bth=50
epc=1
model=Sequential()
mc=ModelCheckpoint("weights.{epoch:02d}-loss{loss:.2f}-acc{acc:.2f}-.hdf5", monitor='loss', verbose=1, save_best_only=False, save_weights_only=False, mode='auto', period=1)
es=EarlyStopping(monitor='loss', min_delta=1, patience=3, verbose=1, mode='auto')
tb=TensorBoard(log_dir='./logs', batch_size=bth, write_graph=True, write_grads=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
lr=ReduceLROnPlateau(monitor='loss', factor=0.1, patience=1, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
model.add(Conv2D(64,(3,3),input_shape=(256,256,3),strides=(1,1),padding="valid",
activation="linear",
kernel_initializer="truncated_normal",
bias_initializer="truncated_normal"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
#conv2_1
model.add(Conv2D(128,(2,2),strides=(1,1),padding="valid",
activation="linear",
kernel_initializer="truncated_normal",
bias_initializer="truncated_normal"))
model.add(BatchNormalization())
model.add(Activation("relu"))
#conv2_2
model.add(MaxPooling2D(pool_size=(2,2)))
#conv3_1
model.add(Conv2D(256,(2,2),strides=(1,1),padding="valid",
activation="linear",
kernel_initializer="truncated_normal",
bias_initializer="truncated_normal"))
model.add(BatchNormalization())
model.add(Activation("relu"))
#conv3_2
model.add(MaxPooling2D(pool_size=(2,2)))
#CONV4_1
model.add(Conv2D(512,(2,2),strides=(1,1),padding="valid",
activation="linear",
kernel_initializer="truncated_normal",
bias_initializer="truncated_normal"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.5))
########################################
## DECONV STARTS
########################################
#DECONV 1 -1
model.add(Conv2DTranspose(512,(2,2),strides=(1,1),padding="valid",
activation="linear",
kernel_initializer="truncated_normal",
bias_initializer="truncated_normal"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(UpSampling2D(size=(2,2)))
# DECONV 2-1
model.add(Conv2DTranspose(256,(2,2),strides=(1,1),padding="valid",
activation="linear",
kernel_initializer="truncated_normal",
bias_initializer="truncated_normal"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(UpSampling2D(size=(2,2)))
#DECONV 3-1
model.add(Conv2DTranspose(128,(2,2),strides=(1,1),padding="valid",
activation="linear",
kernel_initializer="truncated_normal",
bias_initializer="truncated_normal"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(UpSampling2D(size=(2,2)))
# DECONV 4-1
model.add(Conv2DTranspose(3,(3,3),strides=(1,1),padding="valid",
activation=advance_relu,
kernel_initializer="truncated_normal",
bias_initializer="truncated_normal"))
model.summary()
k=model.get_config()
print(k)
print("values")
print(k.values)
model.compile(
loss="mse",
optimizer="adam",
metrics=['accuracy',psnr]
)
with open("./data.pkl",'rb') as f:
data=pickle.load(f)
model.fit(data['norain'].reshape(-1,256,256,3)[:100,:],data['rain'].reshape(-1,256,256,3)[:100,:],epochs=epc,batch_size=bth,shuffle=True,validation_split=0.3,callbacks=[mc,es,tb,lr])
|
the-stack_106_30250 | # The MIT License
#
# Copyright (c) 2008
# Shibzoukhov Zaur Moukhadinovich
# [email protected]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
NoneType = type(None)
from types import BuiltinFunctionType, BuiltinMethodType, FunctionType, MethodType
from sys import version_info
py_version = version_info[0]*10 + version_info[1]
if py_version >= 30:
simpleTypes = (NoneType, int, str, bool, float, bytes)
else:
simpleTypes = (NoneType, int, long, str, bool, float, bytes)
constants = ((), frozenset())
class Cacher(object):
#
def __init__(self):
self.objects_cache = {}
self.objects_info = {}
method_cache = {}
def cache_method(name):
def func(m, name=name):
method_cache[name] = m
return m
return func
def visit(self, o):
if type(o) in simpleTypes:
return
if o in constants:
return
oId = id(o)
if oId in self.objects_cache:
info = self.objects_info[oId]
if info == 0:
self.objects_info[oId] = 1
else:
self.objects_cache[oId] = o
self.objects_info[oId] = 0
method = method_cache.get(o.__class__.__name__, visit_object)
method(self, o)
#
@cache_method('list')
def visit_list(self, o):
for item in o:
visit(self, item)
#
@cache_method('set')
def visit_set(self, o):
for item in o:
visit(self, item)
#
@cache_method('frosenset')
def visit_frozenset(self, o):
for item in o:
visit(self, item)
#
@cache_method('tuple')
def visit_tuple(self, o):
for item in o:
visit(self, item)
#
@cache_method('object')
def visit_object(self, o):
return
#
@cache_method('type')
def visit_type(self, o):
metatype = o.__class__
if metatype == type:
return
else:
return
#
@cache_method('dict')
def visit_dict(self, o):
for key,item in o.items():
visit(self, key)
visit(self, item)
@cache_method('property')
def visit_property(self, o):
for f in (o.fget, o.fset, o.fdel, o.__doc__):
if f is not None:
visit(self, f)
@cache_method('function')
def visit_function(self, o):
return
#
@cache_method('method')
def visit_method(self, o):
return visit(self, o.__self__)
#
@cache_method('builtin_function_or_method')
def visit_builtin_function_or_method(self, o):
return
@cache_method('object')
def visit_object(self, o):
if isinstance(o, type):
return visit_type(self, o)
reduce = getattr(o, '__reduce__', None)
if reduce:
state = reduce()
return with_reduce(self, state)
else:
newname = o.__class__.__name__
newargs = None
getnewargs = getattr(o, '__getnewargs__', None)
if getnewargs:
newargs = getnewargs()
state = None
getstate = getattr(o, '__getstate__', None)
if getstate:
state = getstate()
else:
state = getattr(o, '__dict__', None)
if state is None:
state = {}
for name in o.__slots__:
value = getattr(o, name, null)
if value is not null:
state[name] = value
return without_reduce(self, newargs, state)
#
def with_reduce(self, state):
visit(self, state[0])
n = len(state)
if n > 1:
if state[1]:
for item in state[1]:
visit(self, item)
if n > 2:
if state[2]:
for k, v in state[2].items():
visit(self, v)
if n > 3:
if state[3]:
for v in state[3]:
visit(self, v)
if n > 4:
if state[4]:
for k, v in state[4].items():
visit(self, k)
visit(self, v)
#
def without_reduce(self, args, state):
if args:
for item in args:
visit(self, item)
if state:
visit(self, state)
|
the-stack_106_30251 | import torch
import numpy as np
import pytorch_lightning as pl
from typing import Any, Dict, Tuple
from prohmr.models import SMPL
from yacs.config import CfgNode
from prohmr.utils import SkeletonRenderer
from prohmr.utils.geometry import aa_to_rotmat, perspective_projection
from prohmr.optimization import OptimizationTask
from .backbones import create_backbone
from .heads import SMPLFlow
from .discriminator import Discriminator
from .losses import Keypoint3DLoss, Keypoint2DLoss, ParameterLoss
class ProHMR(pl.LightningModule):
def __init__(self, cfg: CfgNode):
"""
Setup ProHMR model
Args:
cfg (CfgNode): Config file as a yacs CfgNode
"""
super().__init__()
self.cfg = cfg
# Create backbone feature extractor
self.backbone = create_backbone(cfg)
# Create Normalizing Flow head
self.flow = SMPLFlow(cfg)
# Create discriminator
self.discriminator = Discriminator()
# Define loss functions
self.keypoint_3d_loss = Keypoint3DLoss(loss_type='l1')
self.keypoint_2d_loss = Keypoint2DLoss(loss_type='l1')
self.smpl_parameter_loss = ParameterLoss()
# Instantiate SMPL model
smpl_cfg = {k.lower(): v for k,v in dict(cfg.SMPL).items()}
self.smpl = SMPL(**smpl_cfg)
# Buffer that shows whetheer we need to initialize ActNorm layers
self.register_buffer('initialized', torch.tensor(False))
# Setup renderer for visualization
self.renderer = SkeletonRenderer(self.cfg)
# Disable automatic optimization since we use adversarial training
self.automatic_optimization = False
def configure_optimizers(self) -> Tuple[torch.optim.Optimizer, torch.optim.Optimizer]:
"""
Setup model and distriminator Optimizers
Returns:
Tuple[torch.optim.Optimizer, torch.optim.Optimizer]: Model and discriminator optimizers
"""
optimizer = torch.optim.AdamW(params=list(self.backbone.parameters()) + list(self.flow.parameters()),
lr=self.cfg.TRAIN.LR,
weight_decay=self.cfg.TRAIN.WEIGHT_DECAY)
optimizer_disc = torch.optim.AdamW(params=self.discriminator.parameters(),
lr=self.cfg.TRAIN.LR,
weight_decay=self.cfg.TRAIN.WEIGHT_DECAY)
return optimizer, optimizer_disc
def initialize(self, batch: Dict, conditioning_feats: torch.Tensor):
"""
Initialize ActNorm buffers by running a dummy forward step
Args:
batch (Dict): Dictionary containing batch data
conditioning_feats (torch.Tensor): Tensor of shape (N, C) containing the conditioning features extracted using thee backbonee
"""
# Get ground truth SMPL params, convert them to 6D and pass them to the flow module together with the conditioning feats.
# Necessary to initialize ActNorm layers.
smpl_params = {k: v.clone() for k,v in batch['smpl_params'].items()}
batch_size = smpl_params['body_pose'].shape[0]
has_smpl_params = batch['has_smpl_params']['body_pose'] > 0
smpl_params['body_pose'] = aa_to_rotmat(smpl_params['body_pose'].reshape(-1, 3)).reshape(batch_size, -1, 3, 3)[:, :, :, :2].permute(0, 1, 3, 2).reshape(batch_size, 1, -1)[has_smpl_params]
smpl_params['global_orient'] = aa_to_rotmat(smpl_params['global_orient'].reshape(-1, 3)).reshape(batch_size, -1, 3, 3)[:, :, :, :2].permute(0, 1, 3, 2).reshape(batch_size, 1, -1)[has_smpl_params]
smpl_params['betas'] = smpl_params['betas'].unsqueeze(1)[has_smpl_params]
conditioning_feats = conditioning_feats[has_smpl_params]
with torch.no_grad():
_, _ = self.flow.log_prob(smpl_params, conditioning_feats)
self.initialized |= True
def forward_step(self, batch: Dict, train: bool = False) -> Dict:
"""
Run a forward step of the network
Args:
batch (Dict): Dictionary containing batch data
train (bool): Flag indicating whether it is training or validation mode
Returns:
Dict: Dictionary containing the regression output
"""
if train:
num_samples = self.cfg.TRAIN.NUM_TRAIN_SAMPLES
else:
num_samples = self.cfg.TRAIN.NUM_TEST_SAMPLES
# Use RGB image as input
x = batch['img']
batch_size = x.shape[0]
# Compute keypoint features using the backbone
conditioning_feats = self.backbone(x)
# If ActNorm layers are not initialized, initialize them
if not self.initialized.item():
self.initialize(batch, conditioning_feats)
# If validation draw num_samples - 1 random samples and the zero vector
if num_samples > 1:
pred_smpl_params, pred_cam, log_prob, _, pred_pose_6d = self.flow(conditioning_feats, num_samples=num_samples-1)
z_0 = torch.zeros(batch_size, 1, self.cfg.MODEL.FLOW.DIM, device=x.device)
pred_smpl_params_mode, pred_cam_mode, log_prob_mode, _, pred_pose_6d_mode = self.flow(conditioning_feats, z=z_0)
pred_smpl_params = {k: torch.cat((pred_smpl_params_mode[k], v), dim=1) for k,v in pred_smpl_params.items()}
pred_cam = torch.cat((pred_cam_mode, pred_cam), dim=1)
log_prob = torch.cat((log_prob_mode, log_prob), dim=1)
pred_pose_6d = torch.cat((pred_pose_6d_mode, pred_pose_6d), dim=1)
else:
z_0 = torch.zeros(batch_size, 1, self.cfg.MODEL.FLOW.DIM, device=x.device)
pred_smpl_params, pred_cam, log_prob, _, pred_pose_6d = self.flow(conditioning_feats, z=z_0)
# Store useful regression outputs to the output dict
output = {}
output['pred_cam'] = pred_cam
output['pred_smpl_params'] = {k: v.clone() for k,v in pred_smpl_params.items()}
output['log_prob'] = log_prob.detach()
output['conditioning_feats'] = conditioning_feats
output['pred_pose_6d'] = pred_pose_6d
# Compute camera translation
device = pred_smpl_params['body_pose'].device
dtype = pred_smpl_params['body_pose'].dtype
focal_length = self.cfg.EXTRA.FOCAL_LENGTH * torch.ones(batch_size, num_samples, 2, device=device, dtype=dtype)
pred_cam_t = torch.stack([pred_cam[:, :, 1],
pred_cam[:, :, 2],
2*focal_length[:, :, 0]/(self.cfg.MODEL.IMAGE_SIZE * pred_cam[:, :, 0] +1e-9)],dim=-1)
output['pred_cam_t'] = pred_cam_t
# Compute model vertices, joints and the projected joints
pred_smpl_params['global_orient'] = pred_smpl_params['global_orient'].reshape(batch_size * num_samples, -1, 3, 3)
pred_smpl_params['body_pose'] = pred_smpl_params['body_pose'].reshape(batch_size * num_samples, -1, 3, 3)
pred_smpl_params['betas'] = pred_smpl_params['betas'].reshape(batch_size * num_samples, -1)
smpl_output = self.smpl(**{k: v.float() for k,v in pred_smpl_params.items()}, pose2rot=False)
output['smpl_output'] = smpl_output
pred_keypoints_3d = smpl_output.joints
pred_vertices = smpl_output.vertices
output['pred_keypoints_3d'] = pred_keypoints_3d.reshape(batch_size, num_samples, -1, 3)
output['pred_vertices'] = pred_vertices.reshape(batch_size, num_samples, -1, 3)
pred_cam_t = pred_cam_t.reshape(-1, 3)
focal_length = focal_length.reshape(-1, 2)
pred_keypoints_2d = perspective_projection(pred_keypoints_3d,
translation=pred_cam_t,
focal_length=focal_length / self.cfg.MODEL.IMAGE_SIZE)
output['pred_keypoints_2d'] = pred_keypoints_2d.reshape(batch_size, num_samples, -1, 2)
return output
def compute_loss(self, batch: Dict, output: Dict, train: bool = True) -> torch.Tensor:
"""
Compute losses given the input batch and the regression output
Args:
batch (Dict): Dictionary containing batch data
output (Dict): Dictionary containing the regression output
train (bool): Flag indicating whether it is training or validation mode
Returns:
torch.Tensor : Total loss for current batch
"""
pred_smpl_params = output['pred_smpl_params']
pred_pose_6d = output['pred_pose_6d']
conditioning_feats = output['conditioning_feats']
pred_keypoints_2d = output['pred_keypoints_2d']
pred_keypoints_3d = output['pred_keypoints_3d']
batch_size = pred_smpl_params['body_pose'].shape[0]
num_samples = pred_smpl_params['body_pose'].shape[1]
device = pred_smpl_params['body_pose'].device
dtype = pred_smpl_params['body_pose'].dtype
# Get annotations
gt_keypoints_2d = batch['keypoints_2d']
gt_keypoints_3d = batch['keypoints_3d']
gt_smpl_params = batch['smpl_params']
has_smpl_params = batch['has_smpl_params']
is_axis_angle = batch['smpl_params_is_axis_angle']
# Compute 3D keypoint loss
loss_keypoints_2d = self.keypoint_2d_loss(pred_keypoints_2d, gt_keypoints_2d.unsqueeze(1).repeat(1, num_samples, 1, 1))
loss_keypoints_3d = self.keypoint_3d_loss(pred_keypoints_3d, gt_keypoints_3d.unsqueeze(1).repeat(1, num_samples, 1, 1), pelvis_id=25+14)
# Compute loss on SMPL parameters
loss_smpl_params = {}
for k, pred in pred_smpl_params.items():
gt = gt_smpl_params[k].unsqueeze(1).repeat(1, num_samples, 1).view(batch_size * num_samples, -1)
if is_axis_angle[k].all():
gt = aa_to_rotmat(gt.reshape(-1, 3)).view(batch_size * num_samples, -1, 3, 3)
has_gt = has_smpl_params[k].unsqueeze(1).repeat(1, num_samples)
loss_smpl_params[k] = self.smpl_parameter_loss(pred.reshape(batch_size, num_samples, -1), gt.reshape(batch_size, num_samples, -1), has_gt)
# Compute mode and expectation losses for 3D and 2D keypoints
# The first item of the second dimension always corresponds to the mode
loss_keypoints_2d_mode = loss_keypoints_2d[:, [0]].sum() / batch_size
if loss_keypoints_2d.shape[1] > 1:
loss_keypoints_2d_exp = loss_keypoints_2d[:, 1:].sum() / (batch_size * (num_samples - 1))
else:
loss_keypoints_2d_exp = torch.tensor(0., device=device, dtype=dtype)
loss_keypoints_3d_mode = loss_keypoints_3d[:, [0]].sum() / batch_size
if loss_keypoints_3d.shape[1] > 1:
loss_keypoints_3d_exp = loss_keypoints_3d[:, 1:].sum() / (batch_size * (num_samples - 1))
else:
loss_keypoints_3d_exp = torch.tensor(0., device=device, dtype=dtype)
loss_smpl_params_mode = {k: v[:, [0]].sum() / batch_size for k,v in loss_smpl_params.items()}
if loss_smpl_params['body_pose'].shape[1] > 1:
loss_smpl_params_exp = {k: v[:, 1:].sum() / (batch_size * (num_samples - 1)) for k,v in loss_smpl_params.items()}
else:
loss_smpl_params_exp = {k: torch.tensor(0., device=device, dtype=dtype) for k,v in loss_smpl_params.items()}
# Filter out images with corresponding SMPL parameter annotations
smpl_params = {k: v.clone() for k,v in gt_smpl_params.items()}
smpl_params['body_pose'] = aa_to_rotmat(smpl_params['body_pose'].reshape(-1, 3)).reshape(batch_size, -1, 3, 3)[:, :, :, :2].permute(0, 1, 3, 2).reshape(batch_size, 1, -1)
smpl_params['global_orient'] = aa_to_rotmat(smpl_params['global_orient'].reshape(-1, 3)).reshape(batch_size, -1, 3, 3)[:, :, :, :2].permute(0, 1, 3, 2).reshape(batch_size, 1, -1)
smpl_params['betas'] = smpl_params['betas'].unsqueeze(1)
has_smpl_params = (batch['has_smpl_params']['body_pose'] > 0)
smpl_params = {k: v[has_smpl_params] for k, v in smpl_params.items()}
# Compute NLL loss
# Add some noise to annotations at training time to prevent overfitting
if train:
smpl_params = {k: v + self.cfg.TRAIN.SMPL_PARAM_NOISE_RATIO * torch.randn_like(v) for k, v in smpl_params.items()}
if smpl_params['body_pose'].shape[0] > 0:
log_prob, _ = self.flow.log_prob(smpl_params, conditioning_feats[has_smpl_params])
else:
log_prob = torch.zeros(1, device=device, dtype=dtype)
loss_nll = -log_prob.mean()
# Compute orthonormal loss on 6D representations
pred_pose_6d = pred_pose_6d.reshape(-1, 2, 3).permute(0, 2, 1)
loss_pose_6d = ((torch.matmul(pred_pose_6d.permute(0, 2, 1), pred_pose_6d) - torch.eye(2, device=pred_pose_6d.device, dtype=pred_pose_6d.dtype).unsqueeze(0)) ** 2)
loss_pose_6d = loss_pose_6d.reshape(batch_size, num_samples, -1)
loss_pose_6d_mode = loss_pose_6d[:, 0].mean()
loss_pose_6d_exp = loss_pose_6d[:, 1:].mean()
loss = self.cfg.LOSS_WEIGHTS['KEYPOINTS_3D_EXP'] * loss_keypoints_3d_exp+\
self.cfg.LOSS_WEIGHTS['KEYPOINTS_2D_EXP'] * loss_keypoints_2d_exp+\
self.cfg.LOSS_WEIGHTS['NLL'] * loss_nll+\
self.cfg.LOSS_WEIGHTS['ORTHOGONAL'] * (loss_pose_6d_exp+loss_pose_6d_mode)+\
sum([loss_smpl_params_exp[k] * self.cfg.LOSS_WEIGHTS[(k+'_EXP').upper()] for k in loss_smpl_params_exp])+\
self.cfg.LOSS_WEIGHTS['KEYPOINTS_3D_MODE'] * loss_keypoints_3d_mode+\
self.cfg.LOSS_WEIGHTS['KEYPOINTS_2D_MODE'] * loss_keypoints_2d_mode+\
sum([loss_smpl_params_mode[k] * self.cfg.LOSS_WEIGHTS[(k+'_MODE').upper()] for k in loss_smpl_params_mode])
losses = dict(loss=loss.detach(),
loss_nll=loss_nll.detach(),
loss_pose_6d_exp=loss_pose_6d_exp,
loss_pose_6d_mode=loss_pose_6d_mode,
loss_keypoints_2d_exp=loss_keypoints_2d_exp.detach(),
loss_keypoints_3d_exp=loss_keypoints_3d_exp.detach(),
loss_keypoints_2d_mode=loss_keypoints_2d_mode.detach(),
loss_keypoints_3d_mode=loss_keypoints_3d_mode.detach())
for k, v in loss_smpl_params_exp.items():
losses['loss_' + k + '_exp'] = v.detach()
for k, v in loss_smpl_params_mode.items():
losses['loss_' + k + '_mode'] = v.detach()
output['losses'] = losses
return loss
def tensorboard_logging(self, batch: Dict, output: Dict, step_count: int, train: bool = True) -> None:
"""
Log results to Tensorboard
Args:
batch (Dict): Dictionary containing batch data
output (Dict): Dictionary containing the regression output
step_count (int): Global training step count
train (bool): Flag indicating whether it is training or validation mode
"""
mode = 'train' if train else 'val'
summary_writer = self.logger.experiment
batch_size = batch['keypoints_2d'].shape[0]
images = batch['img']
images = images * torch.tensor([0.229, 0.224, 0.225], device=images.device).reshape(1,3,1,1)
images = images + torch.tensor([0.485, 0.456, 0.406], device=images.device).reshape(1,3,1,1)
images = 255*images.permute(0, 2, 3, 1).cpu().numpy()
num_samples = self.cfg.TRAIN.NUM_TRAIN_SAMPLES if mode == 'train' else self.cfg.TRAIN.NUM_TEST_SAMPLES
pred_keypoints_3d = output['pred_keypoints_3d'].detach().reshape(batch_size, num_samples, -1, 3)
gt_keypoints_3d = batch['keypoints_3d']
gt_keypoints_2d = batch['keypoints_2d']
losses = output['losses']
pred_cam_t = output['pred_cam_t'].detach().reshape(batch_size, num_samples, 3)
pred_keypoints_2d = output['pred_keypoints_2d'].detach().reshape(batch_size, num_samples, -1, 2)
for loss_name, val in losses.items():
summary_writer.add_scalar(mode +'/' + loss_name, val.detach().item(), step_count)
num_images = min(batch_size, self.cfg.EXTRA.NUM_LOG_IMAGES)
num_samples_per_image = min(num_samples, self.cfg.EXTRA.NUM_LOG_SAMPLES_PER_IMAGE)
gt_keypoints_3d = batch['keypoints_3d']
pred_keypoints_3d = output['pred_keypoints_3d'].detach().reshape(batch_size, num_samples, -1, 3)
# We render the skeletons instead of the full mesh because rendering a lot of meshes will make the training slow.
predictions = self.renderer(pred_keypoints_3d[:num_images, :num_samples_per_image],
gt_keypoints_3d[:num_images],
2 * gt_keypoints_2d[:num_images],
images=images[:num_images],
camera_translation=pred_cam_t[:num_images, :num_samples_per_image])
summary_writer.add_image('%s/predictions' % mode, predictions.transpose((2, 0, 1)), step_count)
def forward(self, batch: Dict) -> Dict:
"""
Run a forward step of the network in val mode
Args:
batch (Dict): Dictionary containing batch data
Returns:
Dict: Dictionary containing the regression output
"""
return self.forward_step(batch, train=False)
def training_step_discriminator(self, batch: Dict,
body_pose: torch.Tensor,
betas: torch.Tensor,
optimizer: torch.optim.Optimizer) -> torch.Tensor:
"""
Run a discriminator training step
Args:
batch (Dict): Dictionary containing mocap batch data
body_pose (torch.Tensor): Regressed body pose from current step
betas (torch.Tensor): Regressed betas from current step
optimizer (torch.optim.Optimizer): Discriminator optimizer
Returns:
torch.Tensor: Discriminator loss
"""
batch_size = body_pose.shape[0]
gt_body_pose = batch['body_pose']
gt_betas = batch['betas']
gt_rotmat = aa_to_rotmat(gt_body_pose.view(-1,3)).view(batch_size, -1, 3, 3)
disc_fake_out = self.discriminator(body_pose.detach(), betas.detach())
loss_fake = ((disc_fake_out - 0.0) ** 2).sum() / batch_size
disc_real_out = self.discriminator(gt_rotmat, gt_betas)
loss_real = ((disc_real_out - 1.0) ** 2).sum() / batch_size
loss_disc = loss_fake + loss_real
loss = self.cfg.LOSS_WEIGHTS.ADVERSARIAL * loss_disc
optimizer.zero_grad()
self.manual_backward(loss)
optimizer.step()
return loss_disc.detach()
def training_step(self, joint_batch: Dict, batch_idx: int, optimizer_idx: int) -> Dict:
"""
Run a full training step
Args:
joint_batch (Dict): Dictionary containing image and mocap batch data
batch_idx (int): Unused.
batch_idx (torch.Tensor): Unused.
Returns:
Dict: Dictionary containing regression output.
"""
batch = joint_batch['img']
mocap_batch = joint_batch['mocap']
optimizer, optimizer_disc = self.optimizers(use_pl_optimizer=True)
batch_size = batch['img'].shape[0]
output = self.forward_step(batch, train=True)
pred_smpl_params = output['pred_smpl_params']
num_samples = pred_smpl_params['body_pose'].shape[1]
pred_smpl_params = output['pred_smpl_params']
loss = self.compute_loss(batch, output, train=True)
disc_out = self.discriminator(pred_smpl_params['body_pose'].reshape(batch_size * num_samples, -1), pred_smpl_params['betas'].reshape(batch_size * num_samples, -1))
loss_adv = ((disc_out - 1.0) ** 2).sum() / batch_size
loss = loss + self.cfg.LOSS_WEIGHTS.ADVERSARIAL * loss_adv
optimizer.zero_grad()
self.manual_backward(loss)
optimizer.step()
loss_disc = self.training_step_discriminator(mocap_batch, pred_smpl_params['body_pose'].reshape(batch_size * num_samples, -1), pred_smpl_params['betas'].reshape(batch_size * num_samples, -1), optimizer_disc)
output['losses']['loss_gen'] = loss_adv
output['losses']['loss_disc'] = loss_disc
if self.global_step > 0 and self.global_step % self.cfg.GENERAL.LOG_STEPS == 0:
self.tensorboard_logging(batch, output, self.global_step, train=True)
return output
def validation_step(self, batch: Dict, batch_idx: int) -> Dict:
"""
Run a validation step and log to Tensorboard
Args:
batch (Dict): Dictionary containing batch data
batch_idx (int): Unused.
Returns:
Dict: Dictionary containing regression output.
"""
batch_size = batch['img'].shape[0]
output = self.forward_step(batch, train=False)
pred_smpl_params = output['pred_smpl_params']
num_samples = pred_smpl_params['body_pose'].shape[1]
loss = self.compute_loss(batch, output, train=False)
output['loss'] = loss
self.tensorboard_logging(batch, output, self.global_step, train=False)
return output
def downstream_optimization(self, regression_output: Dict, batch: Dict, opt_task: OptimizationTask, **kwargs: Any) -> Dict:
"""
Run downstream optimization using current regression output
Args:
regression_output (Dict): Dictionary containing batch data
batch (Dict): Dictionary containing batch data
opt_task (OptimizationTask): Class object for desired optimization task. Must implement __call__ method.
Returns:
Dict: Dictionary containing regression output.
"""
conditioning_feats = regression_output['conditioning_feats']
flow_net = lambda x: self.flow(conditioning_feats, z=x)
return opt_task(flow_net=flow_net,
regression_output=regression_output,
data=batch,
**kwargs)
|
the-stack_106_30252 | from bs4 import BeautifulSoup
from io import BytesIO
import mock
import pytest
from PIL import Image
from django.conf import settings
from capapi.tests.helpers import check_response
from capweb.helpers import reverse, page_image_url
from scripts import update_snippets
@pytest.mark.django_db
def test_nav(client, ingest_case_xml, reporter):
"""
All our navigation links lead to somewhere 200 Ok
"""
# this is necessary because some routes need specific snippets now
update_snippets.update_all()
response = client.get(reverse('home'))
check_response(response)
soup = BeautifulSoup(response.content.decode(), 'html.parser')
dropdown_item = soup.find_all('a', {'class': 'dropdown-item'})
for a in dropdown_item:
res = client.get(a.get('href'))
check_response(res)
nav_links = soup.find_all('a', {'class': 'nav-link'})
for a in nav_links:
res = client.get(a.get('href'))
check_response(res)
@pytest.mark.django_db
def test_footer(client):
"""
All our footer links lead to somewhere 200 Ok
"""
response = client.get(reverse('home'))
soup = BeautifulSoup(response.content.decode(), 'html.parser')
anchors = soup.find('footer').find_all('a')
for a in anchors:
url = a.get('href')
if settings.PARENT_HOST in url:
res = client.get(url)
check_response(res)
@pytest.mark.django_db
def test_contact(client, auth_client):
response = client.get(reverse('contact'))
soup = BeautifulSoup(response.content.decode(), 'html.parser')
email = soup.find('a', {'class': 'contact_email'})
assert email.get('href').split("mailto:")[1] == settings.DEFAULT_FROM_EMAIL
assert not soup.find('input', {'id': 'id_email'}).get('value')
response = auth_client.get(reverse('contact'))
soup = BeautifulSoup(response.content.decode(), 'html.parser')
assert soup.find('input', {'id': 'id_email'}).get('value') == auth_client.auth_user.email
def test_screenshot(client, live_server, settings, ngrammed_cases):
# set up conditions for /screenshot/ route to work
settings.SCREENSHOT_FEATURE = True
settings.DEBUG = True # so view expects an http url
live_server_port = live_server.url.rsplit(':', 1)[1]
with mock.patch('capweb.views._safe_domains', ['case.test:%s' % live_server_port]):
# url we want a screenshot of -- .graph-container in /trends/?q=the
target_url = reverse('trends', port=live_server_port).replace(':8000', '') + '?q=the'
target_selector = '.graph-container'
# check screenshot
screenshot_url = page_image_url(target_url, targets=[target_selector], timeout=30)
response = client.get(screenshot_url)
check_response(response, content_type="image/png")
# screenshot size doesn't seem to be consistent across host environments?
# width, height = Image.open(BytesIO(response.content)).size
# assert width == 664
# assert height == 400
# check fallback screenshot
screenshot_url = page_image_url(target_url, targets=['.does_not_exist'], timeout=30)
response = client.get(screenshot_url)
check_response(response, content_type="image/jpeg")
# check that we got the default fallback image, api.jpg
width, height = Image.open(BytesIO(response.content)).size
assert width == 1200
assert height == 630
|
the-stack_106_30255 | import numpy as np
from pathlib import Path
import xarray as xr
import pandas as pd
from pyproj import Proj, transform
import pickle as pkl
from autocorr_functions import *
import autocorr_cmls as accml
import sys
sys.path.append("/home/adameshel/Documents/code/")
from helper_functions import *
# Create a dict with aggregation times, time stamps, pars and std errors
# based on the godness of fit to a 2d ACF from a gridded radar.
my_path = Path('/home/adameshel/Documents/code/autocorr/semi_real/main_with_gamma/')
agg_times = ['5T','15T','30T','60T','90T','120T','180T']
raw_path = Path('/home/adameshel/Documents/code/kit_code/\
2d_method_intercomparison/data/raw/')
list_of_GT_datasets = []
ds_radolan = xr.open_mfdataset(
str(raw_path.joinpath('radklim_yw_for_adam.nc').absolute()),
combine='by_coords'
)
start_time_idx = 0#15
end_time_idx = -1#70#340#len(ds_radolan_cut.time)
####### CHANGE DOMAIN ######
## Medium cut
min_lat = 47.6890
min_lon = 8.1873
max_lat = 49.1185
max_lon = 10.0978
ds_radolan_cut = ds_radolan.where((ds_radolan['latitudes'] >= min_lat) &
(ds_radolan['latitudes'] <= max_lat) &
(ds_radolan['longitudes'] >= min_lon) &
(ds_radolan['longitudes'] <= max_lon),
drop=True)
proj_degrees = Proj(init='epsg:4326')
proj_meters = Proj(init='epsg:3043')#3857 FROM MAX #3395 #3043 UTM
# from pyproj import Transformer
x_grid_utm, y_grid_utm = transform(proj_degrees,
proj_meters,
ds_radolan_cut.longitudes.values,
ds_radolan_cut.latitudes.values)
ds_radolan_cut.coords['x_utm'] = (('y', 'x'), x_grid_utm)
ds_radolan_cut.coords['y_utm'] = (('y', 'x'), y_grid_utm)
time_frame = ds_radolan_cut.time[start_time_idx:end_time_idx]
num_of_ts = len(time_frame)
ds_radolan_GT = ds_radolan_cut.where(ds_radolan_cut.time == \
ds_radolan_cut.time[start_time_idx:end_time_idx])
ds_radolan_GT = ds_radolan_GT.rename({'rainfall_amount':'raindepth'})
rain_mat = ds_radolan_GT.raindepth.values #12 # to make it mm/h
# rain_mat[rain_mat < 0.1] = 0.0
rain_mat = rain_mat
del ds_radolan_GT
ds_radolan_GT = xr.Dataset(
data_vars={'raindepth': (('time','y', 'x'), rain_mat)},
coords={'lon_grid': (('y', 'x'), ds_radolan_cut.longitudes.values),
'lat_grid': (('y', 'x'), ds_radolan_cut.latitudes.values),
'x_utm': (('y', 'x'), ds_radolan_cut.x_utm.values),
'y_utm': (('y', 'x'), ds_radolan_cut.y_utm.values),
'time': time_frame,
'x': ds_radolan_cut.x.values,
'y': ds_radolan_cut.y.values})
d_run = {}
for at, agg in enumerate(agg_times):
l_pars = []; l_pars_err = []; l_ts = []; l_r2 = []
print(str("ds_radolan_GT_" + agg))
globals()["ds_radolan_GT_" + agg] = ds_radolan_GT.resample(
time=agg, label='right',
restore_coord_dims=False).sum(dim='time')
list_of_GT_datasets.append(str("ds_radolan_GT_" + agg))
for ts in range(len(globals()["ds_radolan_GT_" + agg].time.values)):
try:
Z = globals()["ds_radolan_GT_" + agg].raindepth.isel(time=ts).values
if np.sum(np.isnan(Z)) + np.sum(np.isinf(Z))==0:
ac = accml.Autocorr(Z)
ac(optimize=True)
if round(ac.pars[1],1)!=1.5:
r2 = round(np.corrcoef(ac.ac_2d.ravel(),
ac.s.ravel())[1,0],3)**2
if r2>=0.6:
l_pars_err.append(ac.std_error)
l_pars.append(ac.pars)
l_ts.append(ts)
l_r2.append(r2)
print(ts,ac.pars,ac.std_error,'r2=%.3f'%r2)
d_run[agg] = [l_ts,l_pars,l_pars_err,l_r2]
except:
pass
with open(str(my_path.joinpath('d_runTEST.pkl')), 'wb') as f:
pkl.dump(d_run, f) |
the-stack_106_30256 | # Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import iso8601
import mock
from oslo_policy import policy as oslo_policy
from oslo_utils.fixture import uuidsentinel as uuids
from webob import exc
from nova.api.openstack.compute import instance_actions as instance_actions_v21
from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
from nova.db.main import models
from nova import exception
from nova import objects
from nova import policy
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_server_actions
from nova import utils
FAKE_UUID = fake_server_actions.FAKE_UUID
FAKE_REQUEST_ID = fake_server_actions.FAKE_REQUEST_ID1
FAKE_EVENT_ID = fake_server_actions.FAKE_ACTION_ID1
FAKE_REQUEST_NOTFOUND_ID = 'req-' + uuids.req_not_found
def format_action(action, expect_traceback=True, expect_host=False,
expect_hostId=False):
'''Remove keys that aren't serialized.'''
to_delete = ('id', 'finish_time', 'created_at', 'updated_at', 'deleted_at',
'deleted')
for key in to_delete:
if key in action:
del(action[key])
if 'start_time' in action:
# NOTE(danms): Without WSGI above us, these will be just stringified
action['start_time'] = str(action['start_time'].replace(tzinfo=None))
for event in action.get('events', []):
format_event(event, action.get('project_id'),
expect_traceback=expect_traceback,
expect_host=expect_host, expect_hostId=expect_hostId)
return action
def format_event(event, project_id, expect_traceback=True, expect_host=False,
expect_hostId=False):
'''Remove keys that aren't serialized.'''
to_delete = ['id', 'created_at', 'updated_at', 'deleted_at', 'deleted',
'action_id', 'details']
if not expect_traceback:
to_delete.append('traceback')
if not expect_host:
to_delete.append('host')
if not expect_hostId:
to_delete.append('hostId')
for key in to_delete:
if key in event:
del(event[key])
if 'start_time' in event:
# NOTE(danms): Without WSGI above us, these will be just stringified
event['start_time'] = str(event['start_time'].replace(tzinfo=None))
if 'finish_time' in event:
# NOTE(danms): Without WSGI above us, these will be just stringified
event['finish_time'] = str(event['finish_time'].replace(tzinfo=None))
return event
class InstanceActionsTestV21(test.NoDBTestCase):
instance_actions = instance_actions_v21
wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
expect_events_non_admin = False
expect_event_hostId = False
expect_event_host = False
def fake_get(self, context, instance_uuid, expected_attrs=None,
cell_down_support=False):
return objects.Instance(
context, id=1, uuid=instance_uuid, project_id=context.project_id)
def setUp(self):
super(InstanceActionsTestV21, self).setUp()
self.controller = self.instance_actions.InstanceActionsController()
self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS)
self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS)
get_patcher = mock.patch.object(compute_api.API, 'get',
side_effect=self.fake_get)
self.addCleanup(get_patcher.stop)
self.mock_get = get_patcher.start()
def _get_http_req(self, action, use_admin_context=False):
fake_url = '/%s/servers/12/%s' % (fakes.FAKE_PROJECT_ID, action)
return fakes.HTTPRequest.blank(fake_url,
use_admin_context=use_admin_context,
version=self.wsgi_api_version)
def _get_http_req_with_version(self, action, use_admin_context=False,
version="2.21"):
fake_url = '/%s/servers/12/%s' % (fakes.FAKE_PROJECT_ID, action)
return fakes.HTTPRequest.blank(fake_url,
use_admin_context=use_admin_context,
version=version)
def _set_policy_rules(self):
rules = {'compute:get': '',
'os_compute_api:os-instance-actions:show': '',
'os_compute_api:os-instance-actions:events': 'is_admin:True'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
def test_list_actions(self):
def fake_get_actions(context, uuid, limit=None, marker=None,
filters=None):
actions = []
for act in self.fake_actions[uuid].values():
action = models.InstanceAction()
action.update(act)
actions.append(action)
return actions
self.stub_out('nova.db.main.api.actions_get', fake_get_actions)
req = self._get_http_req('os-instance-actions')
res_dict = self.controller.index(req, FAKE_UUID)
for res in res_dict['instanceActions']:
fake_action = self.fake_actions[FAKE_UUID][res['request_id']]
self.assertEqual(format_action(fake_action), format_action(res))
def test_get_action_with_events_allowed(self):
def fake_get_action(context, uuid, request_id):
action = models.InstanceAction()
action.update(self.fake_actions[uuid][request_id])
return action
def fake_get_events(context, action_id):
events = []
for evt in self.fake_events[action_id]:
event = models.InstanceActionEvent()
event.update(evt)
events.append(event)
return events
self.stub_out(
'nova.db.main.api.action_get_by_request_id', fake_get_action)
self.stub_out('nova.db.main.api.action_events_get', fake_get_events)
req = self._get_http_req('os-instance-actions/1',
use_admin_context=True)
res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
fake_events = self.fake_events[fake_action['id']]
fake_action['events'] = fake_events
self.assertEqual(format_action(fake_action,
expect_host=self.expect_event_host,
expect_hostId=self.expect_event_hostId),
format_action(res_dict['instanceAction'],
expect_host=self.expect_event_host,
expect_hostId=self.expect_event_hostId))
def test_get_action_with_events_not_allowed(self):
def fake_get_action(context, uuid, request_id):
return self.fake_actions[uuid][request_id]
def fake_get_events(context, action_id):
return self.fake_events[action_id]
self.stub_out(
'nova.db.main.api.action_get_by_request_id', fake_get_action)
self.stub_out('nova.db.main.api.action_events_get', fake_get_events)
self._set_policy_rules()
req = self._get_http_req('os-instance-actions/1')
res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
if self.expect_events_non_admin:
fake_event = fake_server_actions.FAKE_EVENTS[FAKE_EVENT_ID]
fake_action['events'] = copy.deepcopy(fake_event)
# By default, non-admins are not allowed to see traceback details
# and event host.
self.assertEqual(format_action(fake_action,
expect_traceback=False,
expect_host=False,
expect_hostId=self.expect_event_hostId),
format_action(res_dict['instanceAction'],
expect_traceback=False,
expect_host=False,
expect_hostId=self.expect_event_hostId))
def test_action_not_found(self):
def fake_no_action(context, uuid, action_id):
return None
self.stub_out(
'nova.db.main.api.action_get_by_request_id', fake_no_action)
req = self._get_http_req('os-instance-actions/1')
self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
FAKE_UUID, FAKE_REQUEST_ID)
def test_index_instance_not_found(self):
self.mock_get.side_effect = exception.InstanceNotFound(
instance_id=FAKE_UUID)
req = self._get_http_req('os-instance-actions')
self.assertRaises(exc.HTTPNotFound, self.controller.index, req,
FAKE_UUID)
self.mock_get.assert_called_once_with(req.environ['nova.context'],
FAKE_UUID, expected_attrs=None,
cell_down_support=False)
def test_show_instance_not_found(self):
self.mock_get.side_effect = exception.InstanceNotFound(
instance_id=FAKE_UUID)
req = self._get_http_req('os-instance-actions/fake')
self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
FAKE_UUID, 'fake')
self.mock_get.assert_called_once_with(req.environ['nova.context'],
FAKE_UUID, expected_attrs=None,
cell_down_support=False)
class InstanceActionsTestV221(InstanceActionsTestV21):
wsgi_api_version = "2.21"
def fake_get(self, context, instance_uuid, expected_attrs=None,
cell_down_support=False):
self.assertEqual('yes', context.read_deleted)
return objects.Instance(
context, id=1, uuid=instance_uuid, project_id=context.project_id)
class InstanceActionsTestV251(InstanceActionsTestV221):
wsgi_api_version = "2.51"
expect_events_non_admin = True
class InstanceActionsTestV258(InstanceActionsTestV251):
wsgi_api_version = "2.58"
@mock.patch('nova.objects.InstanceActionList.get_by_instance_uuid')
def test_get_action_with_invalid_marker(self, mock_actions_get):
"""Tests detail paging with an invalid marker (not found)."""
mock_actions_get.side_effect = exception.MarkerNotFound(
marker=FAKE_REQUEST_NOTFOUND_ID)
req = self._get_http_req('os-instance-actions?'
'marker=%s' % FAKE_REQUEST_NOTFOUND_ID)
self.assertRaises(exc.HTTPBadRequest,
self.controller.index, req, FAKE_UUID)
def test_get_action_with_invalid_limit(self):
"""Tests get paging with an invalid limit."""
req = self._get_http_req('os-instance-actions?limit=x')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
req = self._get_http_req('os-instance-actions?limit=-1')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_get_action_with_invalid_change_since(self):
"""Tests get paging with a invalid change_since."""
req = self._get_http_req('os-instance-actions?'
'changes-since=wrong_time')
ex = self.assertRaises(exception.ValidationError,
self.controller.index, req)
self.assertIn('Invalid input for query parameters changes-since',
str(ex))
def test_get_action_with_invalid_params(self):
"""Tests get paging with a invalid change_since."""
req = self._get_http_req('os-instance-actions?'
'wrong_params=xxx')
ex = self.assertRaises(exception.ValidationError,
self.controller.index, req)
self.assertIn('Additional properties are not allowed', str(ex))
def test_get_action_with_multi_params(self):
"""Tests get paging with multi markers."""
req = self._get_http_req('os-instance-actions?marker=A&marker=B')
ex = self.assertRaises(exception.ValidationError,
self.controller.index, req)
self.assertIn('Invalid input for query parameters marker', str(ex))
class InstanceActionsTestV262(InstanceActionsTestV258):
wsgi_api_version = "2.62"
expect_event_hostId = True
expect_event_host = True
instance_project_id = '26cde4489f6749a08834741678df3c4a'
def fake_get(self, context, instance_uuid, expected_attrs=None,
cell_down_support=False):
return objects.Instance(uuid=instance_uuid,
project_id=self.instance_project_id)
@mock.patch.object(compute_api.InstanceActionAPI, 'action_events_get')
@mock.patch.object(compute_api.InstanceActionAPI,
'action_get_by_request_id')
def test_get_action_with_events_project_id_none(self, mock_action_get,
mock_action_events):
fake_request_id = 'req-%s' % uuids.req1
mock_action_get.return_value = objects.InstanceAction(
id=789,
action='stop',
instance_uuid=uuids.instance,
request_id=fake_request_id,
user_id=None,
project_id=None,
start_time=datetime.datetime(2019, 2, 28, 14, 28, 0, 0),
finish_time=None,
message='',
created_at=None,
updated_at=None,
deleted_at=None,
deleted=False)
mock_action_events.return_value = [
objects.InstanceActionEvent(
id=5,
action_id=789,
event='compute_stop_instance',
start_time=datetime.datetime(2019, 2, 28, 14, 28, 0, 0),
finish_time=datetime.datetime(2019, 2, 28, 14, 30, 0, 0),
result='Success',
traceback='',
created_at=None,
updated_at=None,
deleted_at=None,
deleted=False,
host='host2')]
req = self._get_http_req('os-instance-actions/1',
use_admin_context=True)
res_dict = self.controller.show(req, uuids.instance, fake_request_id)
# Assert that 'project_id' is null (None) in the response
self.assertIsNone(res_dict['instanceAction']['project_id'])
self.assertEqual('host2',
res_dict['instanceAction']['events'][0]['host'])
# Assert that the 'hostId' is based on 'host' and the project ID
# of the server
self.assertEqual(utils.generate_hostid(
res_dict['instanceAction']['events'][0]['host'],
self.instance_project_id),
res_dict['instanceAction']['events'][0]['hostId'])
class InstanceActionsTestV266(InstanceActionsTestV258):
wsgi_api_version = "2.66"
def test_get_action_with_invalid_changes_before(self):
"""Tests get paging with a invalid changes-before."""
req = self._get_http_req('os-instance-actions?'
'changes-before=wrong_time')
ex = self.assertRaises(exception.ValidationError,
self.controller.index, req)
self.assertIn('Invalid input for query parameters changes-before',
str(ex))
@mock.patch('nova.compute.api.InstanceActionAPI.actions_get')
@mock.patch('nova.api.openstack.common.get_instance')
def test_get_action_with_changes_since_and_changes_before(
self, mock_get_instance, mock_action_get):
param = 'changes-since=2012-12-05T00:00:00Z&' \
'changes-before=2012-12-05T01:00:00Z'
req = self._get_http_req_with_version('os-instance-actions?%s' %
param, use_admin_context=True,
version=self.wsgi_api_version)
instance = fake_instance.fake_instance_obj(req.environ['nova.context'])
mock_get_instance.return_value = instance
self.controller.index(req, FAKE_UUID)
filters = {'changes-since': datetime.datetime(
2012, 12, 5, 0, 0, tzinfo=iso8601.iso8601.UTC),
'changes-before': datetime.datetime(
2012, 12, 5, 1, 0, tzinfo=iso8601.iso8601.UTC)}
mock_action_get.assert_called_once_with(req.environ['nova.context'],
instance, limit=1000,
marker=None,
filters=filters)
def test_instance_actions_filters_with_distinct_changes_time_bad_request(
self):
changes_since = '2018-09-04T05:45:27Z'
changes_before = '2018-09-03T05:45:27Z'
req = self._get_http_req('os-instance-actions?'
'changes-since=%s&changes-before=%s' %
(changes_since, changes_before))
ex = self.assertRaises(exc.HTTPBadRequest, self.controller.index,
req, FAKE_UUID)
self.assertIn('The value of changes-since must be less than '
'or equal to changes-before', str(ex))
def test_get_action_with_changes_before_old_microversion(self):
"""Tests that the changes-before query parameter is an error before
microversion 2.66.
"""
param = 'changes-before=2018-09-13T15:13:03Z'
req = self._get_http_req_with_version('os-instance-actions?%s' %
param, use_admin_context=True,
version="2.65")
ex = self.assertRaises(exception.ValidationError,
self.controller.index, req)
detail = 'Additional properties are not allowed'
self.assertIn(detail, str(ex))
class InstanceActionsTestV284(InstanceActionsTestV266):
wsgi_api_version = "2.84"
def _set_policy_rules(self, overwrite=True):
rules = {'os_compute_api:os-instance-actions:show': '',
'os_compute_api:os-instance-actions:events:details':
'project_id:%(project_id)s'}
policy.set_rules(oslo_policy.Rules.from_dict(rules),
overwrite=overwrite)
def test_show_action_with_details(self):
def fake_get_action(context, uuid, request_id):
return self.fake_actions[uuid][request_id]
def fake_get_events(context, action_id):
return self.fake_events[action_id]
self.stub_out(
'nova.db.main.api.action_get_by_request_id', fake_get_action)
self.stub_out('nova.db.main.api.action_events_get', fake_get_events)
self._set_policy_rules(overwrite=False)
req = self._get_http_req('os-instance-actions/1')
res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
for event in res_dict['instanceAction']['events']:
self.assertIn('details', event)
def test_show_action_with_details_old_microversion(self):
"""Before microversion 2.84, we cannot get the details in events."""
def fake_get_action(context, uuid, request_id):
return self.fake_actions[uuid][request_id]
def fake_get_events(context, action_id):
return self.fake_events[action_id]
self.stub_out(
'nova.db.main.api.action_get_by_request_id', fake_get_action)
self.stub_out('nova.db.main.api.action_events_get', fake_get_events)
req = self._get_http_req_with_version('os-instance-actions/1',
version="2.83")
res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
for event in res_dict['instanceAction']['events']:
self.assertNotIn('details', event)
|
the-stack_106_30258 | # SPDX-License-Identifier: Apache-2.0
# This file is for testing ONNX with ONNXRuntime during ONNX Release
# Create a general scenario to use ONNXRuntime with ONNX
def example_test_with_ort() -> None:
import onnx
import numpy # type: ignore
import onnxruntime as rt # type: ignore
from onnxruntime.datasets import get_example # type: ignore
import numpy.random # type: ignore
# get certain example model from ORT
example1 = get_example("sigmoid.onnx")
# test ONNX functions
model = onnx.load(example1)
onnx.checker.check_model(model)
onnx.checker.check_model(model, True)
inferred_model = onnx.shape_inference.infer_shapes(model, True)
temp_filename = "temp.onnx"
onnx.save(inferred_model, temp_filename)
# test ONNXRuntime functions
sess = rt.InferenceSession(temp_filename)
input_name = sess.get_inputs()[0].name
output_name = sess.get_outputs()[0].name
x = numpy.random.random((3, 4, 5))
x = x.astype(numpy.float32)
sess.run([output_name], {input_name: x})
if __name__ == "__main__":
example_test_with_ort()
|
the-stack_106_30259 | import datetime
from .args import (
build_argparser,
get_students_from_args,
get_assignments_from_args,
compute_stogit_url,
)
def args(arglist):
return vars(build_argparser().parse_args(args=arglist))
students = {
'my': ['rives'],
'section-a': ['student-a'],
'section-b': ['student-b'],
}
def test_all():
# check that --all includes all students
assert get_students_from_args(**args(['--all']), _all_students=students) == students['my'] + students['section-a'] + students['section-b']
def test_students():
# multiple sets of --students should wind up as one flattened list
assert get_students_from_args(**args(['--students', 'a', 'b', '--students', 'c']), _all_students=students) == ['a', 'b', 'c']
# it should return a sorted list of student names
assert get_students_from_args(**args(['--students', 'c', 'b', '--students', 'a']), _all_students=students) == ['a', 'b', 'c']
# multiple occurences of the same student should be removed
assert get_students_from_args(**args(['--students', 'a', 'a', '--students', 'a']), _all_students=students) == ['a']
# if no students are given, it should default to the "my" section
assert get_students_from_args(**args([]), _all_students=students) == students['my']
def test_section():
# "--section $name" should return the students for that section
assert get_students_from_args(**args(['--section', 'a']), _all_students=students) == students['section-a']
def test_record():
assert get_assignments_from_args(**args(['--record', 'hw4'])) == ['hw4']
def test_stogit_url_computation():
assert compute_stogit_url(stogit=None, course='sd', _now=datetime.date(2017, 1, 31)) \
== '[email protected]:sd-s17'
assert compute_stogit_url(stogit=None, course='sd', _now=datetime.date(2016, 9, 15)) \
== '[email protected]:sd-f16'
assert compute_stogit_url(stogit=None, course='sd', _now=datetime.date(2016, 4, 15)) \
== '[email protected]:sd-s16'
assert compute_stogit_url(stogit='blah', course='sd', _now=datetime.date.today()) \
== 'blah'
assert compute_stogit_url(stogit=None, course='hd', _now=datetime.date(2016, 4, 15)) \
== '[email protected]:hd-s16'
|
the-stack_106_30260 | import asyncio
from asyncio.events import AbstractEventLoop
from typing import Optional
import pytest
from temporal.converter import DEFAULT_DATA_CONVERTER_INSTANCE
from temporal.workerfactory import WorkerFactory
from temporal.workflow import WorkflowClient
from . import cleanup_worker
loop: Optional[AbstractEventLoop] = None
@pytest.fixture
def event_loop():
global loop
if not loop:
loop = asyncio.get_event_loop()
yield loop
@pytest.fixture
async def worker(request):
marker = request.node.get_closest_marker("worker_config")
namespace = marker.args[0]
task_queue = marker.args[1]
activities = marker.kwargs.get("activities", [])
workflows = marker.kwargs.get("workflows", [])
data_converter = marker.kwargs.get(
"data_converter", DEFAULT_DATA_CONVERTER_INSTANCE
)
client: WorkflowClient = WorkflowClient.new_client(
"localhost", 7233, data_converter=data_converter
)
factory = WorkerFactory(client, namespace)
worker_instance = factory.new_worker(task_queue)
for a_instance, a_cls in activities:
worker_instance.register_activities_implementation(a_instance, a_cls)
for w in workflows:
worker_instance.register_workflow_implementation_type(w)
factory.start()
yield worker_instance
asyncio.create_task(cleanup_worker(client, worker_instance))
|
the-stack_106_30261 | from urllib.request import urlopen
protein_seq_file = 'data/newUpdated_protein_R64-3.fsa'
protparam_file = 'data/newUpdated_protparam_R64-3.txt'
codonw_file = 'data/newUpdated_coding_codonw_R64-3.out'
protparam_root_url = 'https://web.expasy.org/cgi-bin/protparam/protparam?sequence='
aaList = ['ala', 'arg', 'asn', 'asp', 'cys', 'gln', 'glu', 'gly', 'his', 'ile', 'leu', 'lys', 'met', 'phe', 'pro', 'ser', 'thr', 'trp', 'tyr','val']
def retrieve_data():
f = open(codonw_file)
key2codonw = {}
for line in f:
if line.startswith('title'):
continue
pieces = line.strip().split('\t')
if len(pieces) > 14:
key = pieces[0].strip()
key2codonw[key] = (pieces[5], pieces[6], pieces[7], pieces[13], pieces[14])
f.close()
f = open(protein_seq_file)
fw = open(protparam_file, "w")
name = None
seq = ''
isFirst = 1
for line in f:
line = line.strip()
if line.startswith('>'):
if name and seq:
process_data(fw, name, seq, key2codonw.get(name), isFirst)
# print (">", name, "\t", seq, "\t", key2codonw.get(name))
seq = ''
isFirst = 0
name = line.replace('>', '')
else:
seq = seq + line
process_data(fw, name, seq, key2codonw.get(name), isFirst)
# print (">", name, "\t", seq, "\t", key2codonw.get(name))
f.close()
fw.close()
def process_data(fw, name, seq, key, isFirst):
seq = seq.replace('*', '')
protein_length = len(seq)
n_term_seq = seq[0:7]
c_term_seq = seq[-7:]
url = protparam_root_url+seq
(cai, cbi, fop, gravy, aromo) = key
response = urlopen(url)
html = response.read().decode('utf-8')
lines = html.split("\n")
mw = None
pI = None
aa2value = {}
atom2composition = {}
formStart = 0
no_cys_ext_coeff = 0
all_cys_ext_coeff = 0
ext_coeff = None
instability_index = None
aliphatic_index = None
for line in lines:
if line.startswith("<B>Molecular weight:</B> "):
mw = line.replace("<B>Molecular weight:</B> ", '')
continue
if line.startswith("<B>Theoretical pI:</B> "):
pI = line.replace("<B>Theoretical pI:</B> ", '')
continue
if "<form " in line:
formStart = 1
if "</form>" in line:
formStart = 0
if formStart == 1:
pieces = line.split('>')[-1].split(' ')
aa = pieces[0].lower()
for x in pieces:
if x == '':
continue
x = x.replace("\t", "")
if x.isdigit():
aa2value[aa] = x
for atom in ['Carbon', 'Hydrogen', 'Nitrogen', 'Oxygen', 'Sulfur']:
if line.startswith(atom):
pieces = line.split(' ')
atom2composition[atom] = pieces[-1]
if line.startswith('Ext. coefficient'):
pieces = line.split(' ')
ext_coeff = pieces[-1]
continue
if "assuming all pairs of Cys residues form cystines" in line:
all_cys_ext_coeff = ext_coeff
continue
if "assuming all Cys residues are reduced" in line:
no_cys_ext_coeff = ext_coeff
continue
if line.startswith("The instability index (II) is computed to be "):
instability_index = line.replace("The instability index (II) is computed to be ", '')
continue
if line.startswith("<B>Aliphatic index:</B> "):
aliphatic_index = line.replace("<B>Aliphatic index:</B> ", '')
if isFirst == 1:
# print header
fw.write("name\tprotein_length\tn_term_seq\tc_term_seq\tMW\tpI\t")
for aa in aaList:
fw.write(aa+"\t")
for atom in ['Carbon', 'Hydrogen', 'Nitrogen', 'Oxygen', 'Sulfur']:
fw.write(atom+"\t")
fw.write("instability_index\taliphatic_index\tcai\tcodon_bias\tfop_score\tgravy_score\taromaticity_score\tno_cys_ext_coeff\tall_cys_ext_coeff\n")
protein_length = len(seq)
n_term_seq = seq[0:7]
c_term_seq = seq[-7:]
cai = str(round(float(cai), 2))
cbi = str(round(float(cbi), 2))
fop = str(round(float(fop), 2))
gravy = str(round(float(gravy), 2))
aromo = str(round(float(aromo), 2))
# print data
fw.write(name + "\t" + str(protein_length) + "\t")
fw.write(n_term_seq + "\t" + c_term_seq + "\t" + mw + "\t" + pI + "\t")
for aa in aaList:
fw.write(str(aa2value.get(aa))+"\t")
for atom in ['Carbon', 'Hydrogen', 'Nitrogen', 'Oxygen', 'Sulfur']:
fw.write(str(atom2composition.get(atom))+"\t")
fw.write(str(instability_index) + "\t" + str(aliphatic_index) + "\t" + str(cai) + "\t" + str(cbi) + "\t" + str(fop) + "\t" + str(gravy) + "\t" + str(aromo) + "\t" + str(no_cys_ext_coeff) + "\t" + str(all_cys_ext_coeff) + "\n")
if __name__ == "__main__":
retrieve_data()
|
the-stack_106_30262 | """
Component to offer a way to select an option from a list.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/input_select/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID, CONF_ICON, CONF_NAME
from homeassistant.loader import bind_hass
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import async_get_last_state
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'input_select'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
CONF_INITIAL = 'initial'
CONF_OPTIONS = 'options'
ATTR_OPTION = 'option'
ATTR_OPTIONS = 'options'
SERVICE_SELECT_OPTION = 'select_option'
SERVICE_SELECT_OPTION_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_OPTION): cv.string,
})
SERVICE_SELECT_NEXT = 'select_next'
SERVICE_SELECT_NEXT_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
SERVICE_SELECT_PREVIOUS = 'select_previous'
SERVICE_SELECT_PREVIOUS_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
SERVICE_SET_OPTIONS = 'set_options'
SERVICE_SET_OPTIONS_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_OPTIONS):
vol.All(cv.ensure_list, vol.Length(min=1), [cv.string]),
})
def _cv_input_select(cfg):
"""Configure validation helper for input select (voluptuous)."""
options = cfg[CONF_OPTIONS]
initial = cfg.get(CONF_INITIAL)
if initial is not None and initial not in options:
raise vol.Invalid('initial state "{}" is not part of the options: {}'
.format(initial, ','.join(options)))
return cfg
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
cv.slug: vol.All({
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_OPTIONS):
vol.All(cv.ensure_list, vol.Length(min=1), [cv.string]),
vol.Optional(CONF_INITIAL): cv.string,
vol.Optional(CONF_ICON): cv.icon,
}, _cv_input_select)})
}, required=True, extra=vol.ALLOW_EXTRA)
@bind_hass
def select_option(hass, entity_id, option):
"""Set value of input_select."""
hass.services.call(DOMAIN, SERVICE_SELECT_OPTION, {
ATTR_ENTITY_ID: entity_id,
ATTR_OPTION: option,
})
@bind_hass
def select_next(hass, entity_id):
"""Set next value of input_select."""
hass.services.call(DOMAIN, SERVICE_SELECT_NEXT, {
ATTR_ENTITY_ID: entity_id,
})
@bind_hass
def select_previous(hass, entity_id):
"""Set previous value of input_select."""
hass.services.call(DOMAIN, SERVICE_SELECT_PREVIOUS, {
ATTR_ENTITY_ID: entity_id,
})
@bind_hass
def set_options(hass, entity_id, options):
"""Set options of input_select."""
hass.services.call(DOMAIN, SERVICE_SET_OPTIONS, {
ATTR_ENTITY_ID: entity_id,
ATTR_OPTIONS: options,
})
@asyncio.coroutine
def async_setup(hass, config):
"""Set up an input select."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entities = []
for object_id, cfg in config[DOMAIN].items():
name = cfg.get(CONF_NAME)
options = cfg.get(CONF_OPTIONS)
initial = cfg.get(CONF_INITIAL)
icon = cfg.get(CONF_ICON)
entities.append(InputSelect(object_id, name, initial, options, icon))
if not entities:
return False
component.async_register_entity_service(
SERVICE_SELECT_OPTION, SERVICE_SELECT_OPTION_SCHEMA,
'async_select_option'
)
component.async_register_entity_service(
SERVICE_SELECT_NEXT, SERVICE_SELECT_NEXT_SCHEMA,
lambda entity, call: entity.async_offset_index(1)
)
component.async_register_entity_service(
SERVICE_SELECT_PREVIOUS, SERVICE_SELECT_PREVIOUS_SCHEMA,
lambda entity, call: entity.async_offset_index(-1)
)
component.async_register_entity_service(
SERVICE_SET_OPTIONS, SERVICE_SET_OPTIONS_SCHEMA,
'async_set_options'
)
yield from component.async_add_entities(entities)
return True
class InputSelect(Entity):
"""Representation of a select input."""
def __init__(self, object_id, name, initial, options, icon):
"""Initialize a select input."""
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = name
self._current_option = initial
self._options = options
self._icon = icon
@asyncio.coroutine
def async_added_to_hass(self):
"""Run when entity about to be added."""
if self._current_option is not None:
return
state = yield from async_get_last_state(self.hass, self.entity_id)
if not state or state.state not in self._options:
self._current_option = self._options[0]
else:
self._current_option = state.state
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the select input."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the state of the component."""
return self._current_option
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_OPTIONS: self._options,
}
@asyncio.coroutine
def async_select_option(self, option):
"""Select new option."""
if option not in self._options:
_LOGGER.warning('Invalid option: %s (possible options: %s)',
option, ', '.join(self._options))
return
self._current_option = option
yield from self.async_update_ha_state()
@asyncio.coroutine
def async_offset_index(self, offset):
"""Offset current index."""
current_index = self._options.index(self._current_option)
new_index = (current_index + offset) % len(self._options)
self._current_option = self._options[new_index]
yield from self.async_update_ha_state()
@asyncio.coroutine
def async_set_options(self, options):
"""Set options."""
self._current_option = options[0]
self._options = options
yield from self.async_update_ha_state()
|
the-stack_106_30263 | import gtk
import gobject
import logging
import wasp.fms
LOG = logging.getLogger("ui.control")
def _make_left_fancy_label(txt, use_markup=True, padding=5):
lbl = gtk.Label(txt)
lbl.set_use_markup(use_markup)
lbl.set_alignment(0.0, 0.5)
lbl.set_padding(padding, 0)
return lbl
class _FMSAxisWidget(gtk.HBox):
""" An enable checkbox, label and value for each FMS axis """
def __init__(self, sizegroup, name, _id, toggled_cb, enabled=True):
gtk.HBox.__init__(self)
cb = gtk.CheckButton()
cb.set_active(enabled)
cb.connect("toggled", toggled_cb, _id)
cb.toggled()
self.pack_start(cb, False, False)
lbl = _make_left_fancy_label("<i>%s :</i>" % name)
sizegroup.add_widget(lbl)
self.pack_start(lbl, False, False)
self._lbl = _make_left_fancy_label("", False, 0)
self.pack_start(self._lbl, True, True)
def set_axis_value(self, value):
self._lbl.set_text(str(value))
class ControlController:
def __init__(self, source, messages_file, settings_file):
self._source = source
self._messages_file = messages_file
self._fms_control = wasp.fms.ControlManager(source, messages_file, settings_file)
#build the UI
self.widget = gtk.VBox()
self._control_widget = gtk.VBox(spacing=5)
self.widget.pack_start(self._control_widget, True, True)
self.widget.pack_start(gtk.HSeparator(), False, False)
self._status_widget = gtk.Label("")
self._status_widget.props.xalign = 0.0
self._status_widget.props.xpad = 5
self.widget.pack_start(self._status_widget, False, True)
gobject.timeout_add(1000/10, self._refresh_label)
#FMS mode
hb = gtk.HBox()
lbl = _make_left_fancy_label("<b>FMS Mode: </b>")
hb.pack_start(lbl, False, False)
self._fms_mode_label = _make_left_fancy_label("", False, 0)
hb.pack_start(self._fms_mode_label, True, True)
self.widget.pack_start(hb, False, True)
#each axis gets a widget that manages its value and enabled state
sg = gtk.SizeGroup(gtk.SIZE_GROUP_HORIZONTAL)
self._axis_id_widget_map = {}
for _id in wasp.fms.ID_LIST_FMS_ATTITUDE:
widget = _FMSAxisWidget(sg, wasp.fms.ID_NAMES[_id], _id, self._fms_axis_enable_toggled)
self.widget.pack_start(widget, False, True)
self._axis_id_widget_map[_id] = widget
def _fms_axis_enable_toggled(self, btn, _id):
if btn.props.active:
self._fms_control.enable_axis(_id)
else:
self._fms_control.disable_axis(_id)
def _update_fms_axis_value_labels(self, enabled, name=None, sp=None):
if enabled:
self._fms_mode_label.set_text (name)
for _id in wasp.fms.ID_LIST_FMS_ATTITUDE:
self._axis_id_widget_map[_id].set_axis_value(sp[_id])
else:
self._fms_mode_label.set_text ("Disabled")
for _id in wasp.fms.ID_LIST_FMS_ATTITUDE:
self._axis_id_widget_map[_id].set_axis_value("")
def _refresh_label(self):
try:
name, sp = self._fms_control.get_mode_and_setpoints()
self._update_fms_axis_value_labels(True, name, sp)
except TypeError:
#no fms enalbed
self._update_fms_axis_value_labels(False)
return True
def _on_enabled(self, btn, widget, control_widget):
enabled = btn.get_active()
widget.set_sensitive(enabled)
control_widget.set_control_enabled(enabled, self._fms_control)
self._fms_control.enable(enabled)
def add_control_widget(self, name, control_widget):
#Each control widget is in a frame with a label and nice padding.
#to the right lies an unlock button that must be clicked to make the
#widget sensitive
widget = control_widget.get_ui_widget()
b = gtk.CheckButton()
l = gtk.Label("<b>Enable %s</b>" % name)
l.props.use_markup = True
h = gtk.HBox()
h.pack_start(b, False, False)
h.pack_start(l, True, True)
f = gtk.Frame()
f.props.shadow_type = gtk.SHADOW_NONE
f.props.label_widget = h
a = gtk.Alignment()
a.set_padding(5,0,10,0)
f.add(a)
b.connect("toggled", self._on_enabled, widget, control_widget)
hb = gtk.HBox(spacing=5)
#make widget unsensitive by default
widget.set_sensitive(False)
hb.pack_start(widget, True, True)
a.add(hb)
f.show_all()
self._control_widget.pack_start(f, False, True)
class ControlWidgetIface:
fms_control = None
def set_control_enabled(self, enabled, fms_control):
raise NotImplementedError
def get_ui_widget(self):
raise NotImplementedError
|
the-stack_106_30265 | """
intro_3_05_variable_names_1.py
Example code from Section 3.5 of <Introduction to SFC Models Using Python.>
Demonstration how variable names are built up.
Copyright 2017 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Imports
# This next line looks bizarre, but is needed for backwards compatibility with Python 2.7.
from __future__ import print_function
import sfc_models
from sfc_models.models import Model, Country
from sfc_models.sector import Sector
sfc_models.register_standard_logs(output_dir='output',
base_file_name=__file__)
mod = Model()
can = Country(mod, 'CA', 'Canada')
# has_F=False: turns off creation of financial asset variables.
sector_yy = Sector(can, 'YY', has_F=False)
sector_yy.AddVariable('W', 'Variable W <constant>', '4.0')
sector_yy.AddVariable('Y', 'Variable Y - depends on local variable', '2*W')
sector_xx = Sector(can, 'XX', has_F=False)
variable_name = sector_yy.GetVariableName('Y')
# format: inserts variable_name where {0} is
eqn = '{0} + 2.0'.format(variable_name)
sector_xx.AddVariable('X', 'Variable x; depends on other sector', eqn)
# Bind the model; solve
eqns = mod.main()
print(eqns)
mod.main()
|
the-stack_106_30266 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module contains the user-facing API for AutoGraph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import inspect
import os
import pdb
import re
import sys
import textwrap
import traceback
from enum import Enum
# pylint:disable=g-bad-import-order
import six
# pylint:enable=g-bad-import-order
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import conversion
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.autograph.pyct import errors
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.utils import ag_logging as logging
from tensorflow.python.autograph.utils import py_func
from tensorflow.python.framework import errors_impl
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import tf_stack
from tensorflow.python.util.tf_export import tf_export
def is_autograph_strict_conversion_mode():
return int(os.environ.get('AUTOGRAPH_STRICT_CONVERSION', '0')) > 0
# TODO(mdan): Export this symbol.
class AutoGraphError(Exception):
"""Base class for all AutoGraph exceptions."""
pass
class ConversionError(AutoGraphError):
"""Raised during the conversion process."""
pass
class StagingError(AutoGraphError):
"""Raised during the staging (i.e. Python execution) of converted code."""
pass
class _ErrorMetadata(errors.ErrorMetadataBase):
"""AutoGraph-specific error metadata. See base class."""
def create_exception(self, preferred_type):
if preferred_type == errors_impl.OpError:
# Best-effort unpacking of OpError exceptions.
# TODO(mdan): Use a mechanism that is more future-proof.
t = type(self.cause)
init_argspec = tf_inspect.getfullargspec(t.__init__)
message = self.get_message()
init_args = tuple(init_argspec.argspec)
# At the time of this writing, TF errors either take 3 or 4 arguments,
# with the fourth being error_code.
if init_args == ('self', 'node_def', 'op', 'message', 'error_code'):
return t(
node_def=self.cause.node_def,
op=self.cause.op,
message=message,
error_code=self.error_code)
elif init_args == ('self', 'node_def', 'op', 'message'):
if 'error_code' in init_argspec.kwonlyargs:
return t(
node_def=self.cause.node_def,
op=self.cause.op,
message=message,
errro_code=self.error_code)
else:
return t(
node_def=self.cause.node_def, op=self.cause.op, message=message)
elif preferred_type in (AutoGraphError, ConversionError, StagingError,
errors_impl.InaccessibleTensorError,
errors_impl.OperatorNotAllowedInGraphError):
return preferred_type(self.get_message())
exc = super(_ErrorMetadata, self).create_exception(preferred_type)
if exc is not None:
return exc
# Note: While changing an error's message property to change the message it
# displays will probably work a lot of times, there is no standard way in
# Python to do that. The safest way is therefore to create a new exception.
# For user defined exceptions, we could define an interface that allowed
# them to work under this mechanism.
return StagingError(self.get_message())
class StackTraceMapper(tf_stack.StackTraceMapper):
"""Remaps generated code to code it originated from."""
def __init__(self, converted_fn):
self._source_map = converted_fn.ag_source_map
def map(self, filename, lineno, name):
loc = origin_info.LineLocation(filename=filename, lineno=lineno)
if loc not in self._source_map:
return filename, lineno, name
origin = self._source_map[loc]
return origin.loc.filename, origin.loc.lineno, origin.function_name
def tf_convert(f, ctx, convert_by_default=True, force_conversion=False):
"""Decorator that applies AutoGraph to a function.
Use in internal APIs.
This API is suitable for high order functions internal to the TensorFlow API,
and more generally any function to which Autograph is not applied.
Guidance: convert was a decorator meant for use directly by developers, and
will be soon deprecated in favor of tf.function. tf_convert is to be called
from high order functions internal to TF.
Args:
f: Callable.
ctx: ag_ctx.ControlStatusCtx, the Autograph context in which `f` is used.
convert_by_default: bool, whether to use AutoGraph when the context doesn't
specify.
force_conversion: bool, whether to ignore the conversion whitelist. See
ConversionOptions.force_conversion.
Returns:
Either `f or the converted version of `f`.
"""
if hasattr(f, '__ag_compiled'):
return f
f_wrapper = f
decorators, f = tf_decorator.unwrap(f)
# TODO(mdan): Grab features from context.
if ctx.status == ag_ctx.Status.ENABLED:
wrapper = convert(recursive=True, force_conversion=force_conversion)(f)
elif ctx.status == ag_ctx.Status.DISABLED:
wrapper = do_not_convert(f)
elif ctx.status == ag_ctx.Status.UNSPECIFIED:
if convert_by_default:
wrapper = convert(recursive=True, force_conversion=force_conversion)(f)
else:
wrapper = call_with_unspecified_conversion_status(f)
else:
raise ValueError(ctx.status)
if decorators:
wrapper = tf_decorator.rewrap(f_wrapper, f, wrapper)
setattr(wrapper, '__ag_compiled', True)
return wrapper
# TODO(mdan): Make private.
def convert(recursive=False, optional_features=None, force_conversion=True):
"""Decorator that compiles a function to use TensorFlow ops.
The decorator is dynamic - it recompiles the target whenever the decorated
function is called. This means the parameter values are known at conversion.
It also means that repeated calls with different types of parameters will be
correctly processed.
Args:
recursive: bool, whether to recursively convert any functions or classes
that the converted function may use.
optional_features: converted.Feature, allows toggling optional or
experimental features. When set to None, only the core features are
enabled.
force_conversion: bool, whether to ignore the conversion whitelist. See
ConversionOptions.force_conversion.
Returns:
Callable, a decorator that converts the given function into an equivalent
function that uses TensorFlow ops.
"""
def decorator(f):
"""Decorator implementation."""
def wrapper(*args, **kwargs):
"""Wrapper that calls the converted version of f."""
with ag_ctx.ControlStatusCtx(
status=ag_ctx.Status.ENABLED, options=optional_features):
try:
return converted_call(
f,
converter.ConversionOptions(
recursive=recursive,
force_conversion=force_conversion,
optional_features=optional_features,
), args, kwargs)
except Exception as e: # pylint:disable=broad-except
if hasattr(e, 'ag_error_metadata'):
raise e.ag_error_metadata.to_exception(type(e))
else:
raise
if inspect.isfunction(f) or inspect.ismethod(f):
wrapper = functools.update_wrapper(wrapper, f)
decorated_wrapper = tf_decorator.make_decorator(f, wrapper)
# Sometimes the decorator is just desugared, making it impossible to detect.
# This attribute makes detection easier.
setattr(decorated_wrapper, '__ag_compiled', True)
return decorated_wrapper
return decorator
class RunMode(Enum):
"""Specifies the way a converted function or method should be executed in TF.
Attributes:
* GRAPH: Call this function directly, as-is. This is suitable for functions
that were already designed for TF graphs and contain ops.
* PY_FUNC: Wrap this function into a py_func op. This is suitable for code
that will only run correctly in Python, for example code that renders to
the display, reads keyboard input, etc.
"""
GRAPH = 1
PY_FUNC = 2
def call_with_unspecified_conversion_status(func):
"""Decorator that resets the conversion context to the unspecified status."""
def wrapper(*args, **kwargs):
with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.UNSPECIFIED):
return func(*args, **kwargs)
if inspect.isfunction(func) or inspect.ismethod(func):
wrapper = functools.update_wrapper(wrapper, func)
setattr(wrapper, '__ag_compiled', True)
return wrapper
def do_not_convert_internal(f):
"""Decorator that marks internal functions which do not need conversion."""
setattr(f, '__ag_compiled', True)
return f
@tf_export('autograph.experimental.do_not_convert')
def do_not_convert(func=None, run_as=RunMode.GRAPH, return_dtypes=None):
"""Decorator that suppresses the conversion of a function.
See also: docs/pyfunc_dtypes.md
Args:
func: function to decorate.
run_as: RunMode, specifies how to use the function in TensorFlow.
return_dtypes: Optional[Iterable[ Union[tf.DType,
utils.py_func.MatchDType]]], the return data types of the converted
function, if run_as is RunMode.PY_FUNC. Ignored otherwise. May be set to
None if the function has no return values.
Returns:
If `func` is not None, returns a `Callable` which is equivalent to
`func`, but is not converted by AutoGraph.
If `func` is None, returns a decorator that, when invoked with a
single `func` argument, returns a `Callable` equivalent to the
above case.
"""
if func is None:
return functools.partial(
do_not_convert,
run_as=run_as,
return_dtypes=return_dtypes)
def graph_wrapper(*args, **kwargs):
with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
return func(*args, **kwargs)
def py_func_wrapper(*args, **kwargs):
if kwargs:
raise NotImplementedError('RunMode.PY_FUNC does not yet support kwargs')
# TODO(mdan): Add support for kwargs.
return py_func.wrap_py_func(
func, return_dtypes, args, kwargs, use_dummy_return=not return_dtypes)
if run_as == RunMode.GRAPH:
wrapper = graph_wrapper
elif run_as == RunMode.PY_FUNC:
wrapper = py_func_wrapper
else:
raise ValueError('unknown value for run_as: %s' % run_as)
if inspect.isfunction(func) or inspect.ismethod(func):
wrapper = functools.update_wrapper(wrapper, func)
setattr(wrapper, '__ag_compiled', True)
return wrapper
def _attach_metadata(e, f, converted):
"""Augments an error with the metadata necessary for rewrite."""
if hasattr(e, 'ag_pass_through'):
return
metadata = getattr(e, 'ag_error_metadata', None)
source_map = f.ag_source_map if converted else {}
if metadata is None:
logging.log(
1, 'Caught error in %s (converted=%s)', f, converted, exc_info=True)
message = '{}: {}'.format(e.__class__.__name__, e)
else:
message = None
cause_tb = traceback.extract_tb(sys.exc_info()[2])[1:]
e.ag_error_metadata = _ErrorMetadata(cause_tb, metadata, message, source_map)
def _call_unconverted(f, args, kwargs, options, update_cache=True):
"""Calls the original function without converting with AutoGraph."""
if update_cache:
conversion.cache_unconverted(f, options)
if inspect_utils.istfmethodtarget(f):
return f.__self__.call(args, kwargs)
try:
if kwargs is not None:
return f(*args, **kwargs)
else:
return f(*args)
except Exception as e: # pylint:disable=broad-except
_attach_metadata(e, f, False)
raise
def _is_known_loaded_type(f, module_name, entity_name):
"""Tests whether the function or method is an instance of a known type."""
if (module_name not in sys.modules or
not hasattr(sys.modules[module_name], entity_name)):
return False
type_entity = getattr(sys.modules[module_name], entity_name)
if isinstance(f, type_entity):
# The method if of this type. Example:
#
# o = ClassType()
# function(o.method)()
return True
# Note: inspect is required here, to avoid unpacking tf.function decorators.
if inspect.ismethod(f):
# The the unbound method if of this type. Example:
#
# class ClassType:
# @function
# def method(self):
# ...
# o = ClassType()
# o.method()
if isinstance(f.__func__, type_entity):
return True
return False
def converted_call(f, options, args, kwargs):
"""Compiles a function call inline. For internal use only."""
logging.log(1, 'Converted call: %s\n args: %s\n kwargs: %s\n', f, args,
kwargs)
if conversion.check_cached_unconverted(f, options):
return _call_unconverted(f, args, kwargs, options, False)
if inspect_utils.isbuiltin(f):
if f is eval:
return py_builtins.eval_in_original_context(f, args, 1)
if f is super:
return py_builtins.super_in_original_context(f, args, 1)
if kwargs:
return py_builtins.overload_of(f)(*args, **kwargs)
else:
return py_builtins.overload_of(f)(*args)
# TODO(mdan): Clean up the naming inconsistency.
if hasattr(f, 'autograph_info__') or hasattr(f, '__ag_compiled'):
logging.log(2, 'Permanently whitelisted: %s: already converted', f)
return _call_unconverted(f, args, kwargs, options)
# TODO(b/122265385): Remove this bypass.
if (_is_known_loaded_type(f, 'wrapt', 'FunctionWrapper') or
_is_known_loaded_type(f, 'wrapt', 'BoundFunctionWrapper')):
logging.warn(
'Entity {} appears to be decorated by wrapt, which is not yet supported'
' by AutoGraph. The function will be called without transformation.'
' You may however apply AutoGraph before the decorator.'.format(f))
logging.log(2, 'Permanently whitelisted: %s: wrapt decorated', f)
return _call_unconverted(f, args, kwargs, options)
if _is_known_loaded_type(f, 'functools', '_lru_cache_wrapper'):
logging.log(2, 'Permanently whitelisted: %s: lru_cache', f)
return _call_unconverted(f, args, kwargs, options)
# Constructors are permanently whitelisted.
# TODO(mdan): Toggle as experimental feature instead.
# TODO(b/124016764): Remove this limitation.
if tf_inspect.isclass(f):
logging.log(2, 'Permanently whitelisted: %s: constructor', f)
return _call_unconverted(f, args, kwargs, options)
# Other built-in modules are permanently whitelisted.
# TODO(mdan): Figure out how to do this consistently for all stdlib modules.
if any(
f in m.__dict__.values() for m in (collections, pdb, copy, inspect, re)):
logging.log(2, 'Permanently whitelisted: %s: part of builtin module', f)
return _call_unconverted(f, args, kwargs, options)
# Custom ops and kernels are also permanently whitelisted.
# See tensorflow.framework.load_library.
if (hasattr(f, '__module__') and
hasattr(f.__module__, '_IS_TENSORFLOW_PLUGIN')):
logging.log(2, 'Permanently whitelisted: %s: TensorFlow plugin', f)
return _call_unconverted(f, args, kwargs, options)
if not options.force_conversion and conversion.is_whitelisted_for_graph(f):
return _call_unconverted(f, args, kwargs, options)
# internal_convert_user_code is for example turned off when issuing a dynamic
# call conversion from generated code while in nonrecursive mode. In that
# case we evidently don't want to recurse, but we still have to convert
# things like builtins.
if not options.internal_convert_user_code:
return _call_unconverted(f, args, kwargs, options)
# TODO(mdan): Move this entire block inside to_graph.
try: # Begin of transformation error guards
# Unwrap functools.partial objects
# TODO(mdan): Consider sharing unwrapping logic with tf_inspect.
# TODO(b/120224672): This unwrapping should be done before the checks above.
while isinstance(f, functools.partial):
args = f.args + args
new_kwargs = {}
if f.keywords is not None:
new_kwargs.update(f.keywords)
if kwargs is not None:
new_kwargs.update(kwargs)
kwargs = new_kwargs
f = f.func
if tf_inspect.isfunction(f) or tf_inspect.ismethod(f):
# Regular functions
target_entity = f
f_self = inspect_utils.getmethodself(f)
# TODO(b/119246461): This may be more elegantly handled using __get__?
if f_self is not None:
effective_args = (f_self,) + args
else:
effective_args = args
elif hasattr(f, '__call__') and hasattr(f, '__class__'):
# Callable objects
target_entity = f.__call__
effective_args = (f,) + args
elif tf_inspect.isclass(f):
# Constructors
# Note: Until we support class constructurs, and enable whole-class
# conversion with an experimental flag, this branch is dead code.
# TODO(mdan): Consider removing unless there is a compelling use case.
target_entity = f
effective_args = args
else:
target_entity = f
raise NotImplementedError('unknown callable type "%s"' % type(f))
if not tf_inspect.isclass(target_entity):
if not hasattr(target_entity, '__code__'):
logging.log(2, 'Permanently whitelisted: %s: native binding',
target_entity)
return _call_unconverted(f, args, kwargs, options)
elif (hasattr(target_entity.__code__, 'co_filename') and
target_entity.__code__.co_filename == '<string>'):
# TODO(mdan): __globals__['txt'] might work in Py3.
logging.log(2, 'Permanently whitelisted: %s: dynamic code (exec?)',
target_entity)
return _call_unconverted(f, args, kwargs, options)
converted_f = to_graph(
target_entity,
recursive=options.recursive,
experimental_optional_features=options.optional_features)
if logging.has_verbosity(2):
logging.log(2, 'Defaults of %s : %s', converted_f,
converted_f.__defaults__)
if six.PY3:
logging.log(2, 'KW defaults of %s : %s',
converted_f, converted_f.__kwdefaults__)
if kwargs is not None:
callargs = tf_inspect.getcallargs(converted_f, *effective_args,
**kwargs)
else:
callargs = tf_inspect.getcallargs(converted_f, *effective_args)
formatted_callargs = '\n'.join(
' {}: {}'.format(k, v) for k, v in callargs.items())
logging.log(2, 'Calling %s with\n%s\n', converted_f, formatted_callargs)
except Exception as e: # pylint:disable=broad-except
logging.log(1, 'Error transforming entity %s', target_entity, exc_info=True)
if is_autograph_strict_conversion_mode():
raise
logging.warn(
'Entity %s could not be transformed and will be executed as-is.'
' Please report this to the AutoGraph team. When filing the bug, set'
' the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and'
' attach the full output. Cause: %s', target_entity, e)
return _call_unconverted(f, args, kwargs, options)
with StackTraceMapper(converted_f), tf_stack.CurrentModuleFilter():
try:
if kwargs is not None:
result = converted_f(*effective_args, **kwargs)
else:
result = converted_f(*effective_args)
except Exception as e:
_attach_metadata(e, converted_f, True)
raise
return result
@tf_export('autograph.to_graph', v1=[])
def to_graph(entity, recursive=True, experimental_optional_features=None):
"""Converts a Python entity into a TensorFlow graph.
Also see: `tf.autograph.to_code`, `tf.function`.
Unlike `tf.function`, `to_graph` is a low-level transpiler that converts
Python code to TensorFlow graph code. It does not implement any caching,
variable management or create any actual ops, and is best used where greater
control over the generated TensorFlow graph is desired. Another difference
from `tf.function` is that `to_graph` will not wrap the graph into a
TensorFlow function or a Python callable. Internally, `tf.function` uses
`to_graph`.
_Example Usage_
```python
def foo(x):
if x > 0:
y = x * x
else:
y = -x
return y
converted_foo = to_graph(foo)
x = tf.constant(1)
y = converted_foo(x) # converted_foo is a TensorFlow Op-like.
assert is_tensor(y)
```
Supported Python entities include:
* functions
* classes
* object methods
Functions are converted into new functions with converted code.
Classes are converted by generating a new class whose methods use converted
code.
Methods are converted into unbound function that have an additional first
argument called `self`.
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value. Controls the use of optional
features in the conversion process.
Returns:
Same as `entity`, the converted Python function or class.
Raises:
ValueError: If the entity could not be converted.
"""
try:
program_ctx = converter.ProgramContext(
options=converter.ConversionOptions(
recursive=recursive,
optional_features=experimental_optional_features),
autograph_module=tf_inspect.getmodule(to_graph))
return conversion.convert(entity, program_ctx)
except (ValueError, AttributeError, KeyError, NameError, AssertionError) as e:
logging.error(1, 'Error converting %s', entity, exc_info=True)
raise ConversionError('converting {}: {}: {}'.format(
entity, e.__class__.__name__, str(e)))
@tf_export(v1=['autograph.to_graph'])
def to_graph_v1(entity,
recursive=True,
arg_values=None,
arg_types=None,
experimental_optional_features=None):
"""Converts a Python entity into a TensorFlow graph.
Also see: `tf.autograph.to_code`, `tf.function`.
Unlike `tf.function`, `to_graph` is a low-level transpiler that converts
Python code to TensorFlow graph code. It does not implement any caching,
variable management or create any actual ops, and is best used where greater
control over the generated TensorFlow graph is desired. Another difference
from `tf.function` is that `to_graph` will not wrap the graph into a
TensorFlow function or a Python callable. Internally, `tf.function` uses
`to_graph`.
_Example Usage_
```python
def foo(x):
if x > 0:
y = x * x
else:
y = -x
return y
converted_foo = to_graph(foo)
x = tf.constant(1)
y = converted_foo(x) # converted_foo is a TensorFlow Op-like.
assert is_tensor(y)
```
Supported Python entities include:
* functions
* classes
* object methods
Functions are converted into new functions with converted code.
Classes are converted by generating a new class whose methods use converted
code.
Methods are converted into unbound function that have an additional first
argument called `self`.
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
arg_values: Deprecated.
arg_types: Deprecated.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value. Controls the use of optional
features in the conversion process.
Returns:
Same as `entity`, the converted Python function or class.
Raises:
ValueError: If the entity could not be converted.
"""
del arg_types
del arg_values
return to_graph(
entity,
recursive=recursive,
experimental_optional_features=experimental_optional_features)
@tf_export(v1=['autograph.to_code'])
def to_code_v1(entity,
recursive=True,
arg_values=None,
arg_types=None,
indentation=' ',
experimental_optional_features=None):
"""Similar to `to_graph`, but returns Python source code as a string.
Also see: `tf.autograph.to_graph`.
`to_graph` returns the Python source code that can be used to generate a
TensorFlow graph that is functionally identical to the input Python code.
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
arg_values: Deprecated.
arg_types: Deprecated.
indentation: Deprecated.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value. Controls the use of optional
features in the conversion process.
Returns:
The converted code as string.
"""
del arg_values
del arg_types
del indentation
return to_code(
entity,
recursive=recursive,
experimental_optional_features=experimental_optional_features)
@tf_export('autograph.to_code', v1=[])
def to_code(entity, recursive=True, experimental_optional_features=None):
"""Similar to `to_graph`, but returns Python source code as a string.
Also see: `tf.autograph.to_graph`.
`to_graph` returns the Python source code that can be used to generate a
TensorFlow graph that is functionally identical to the input Python code.
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value. Controls the use of optional
features in the conversion process.
Returns:
The converted code as string.
"""
source = tf_inspect.getsource(
to_graph(
entity,
recursive=recursive,
experimental_optional_features=experimental_optional_features))
return textwrap.dedent(source)
|
the-stack_106_30268 | # coding: utf-8
from django.conf.urls import url
from bootcamp.feeds import views
urlpatterns = [
url(r'^$', views.feeds, name='feeds'),
url(r'^post/$', views.post, name='post'),
url(r'^like/$', views.like, name='like'),
url(r'^comment/$', views.comment, name='comment'),
url(r'^load/$', views.load, name='load'),
url(r'^check/$', views.check, name='check'),
url(r'^load_new/$', views.load_new, name='load_new'),
url(r'^update/$', views.update, name='update'),
url(r'^track_comments/$', views.track_comments, name='track_comments'),
url(r'^remove/$', views.remove, name='remove_feed'),
url(r'^(\d+)/$', views.feed, name='feed'),
]
|
the-stack_106_30270 | # Modifications copyright 2022 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Original copyright 2021 Megvii, Base Detection
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backbone for YOLOPAFPN.
Modifications include:
- Removed unused Darknet class
- Removed unused DWConv, ResLayer import
- Removed depthwise and act arguments
- Refactor and formatting
"""
from typing import Dict, Tuple
import torch
import torch.nn as nn
from peekingduck.pipeline.nodes.model.yoloxv1.yolox_files.network_blocks import (
BaseConv,
CSPLayer,
Focus,
SPPBottleneck,
)
class CSPDarknet(nn.Module):
"""Modified CSPNet with SiLU activation.
Args:
dep_mul (float): Depth multiplier, used to determine the number of
Bottlenecks in `CSPLayer`.
wid_mul (float): Width multiplier, used to determine the number of
`in_channels` and `out_channels` in `BaseConv`.
out_features (Tuple[str, str, str]): Selects the desired outputs in
`forward()`. `YOLOPAFPN` creates `CSPDarknet` with
`out_features = ("dark3", "dark4", "dark5")`.
"""
# pylint: disable=arguments-differ
def __init__(
self,
dep_mul: float,
wid_mul: float,
out_features: Tuple[str, str, str],
) -> None:
super().__init__()
self.out_features = out_features
channels = int(wid_mul * 64) # 64
depth = max(round(dep_mul * 3), 1) # 3
self.stem = Focus(3, channels, 3)
self.dark2 = nn.Sequential(*self.make_group_layer(channels, depth))
self.dark3 = nn.Sequential(*self.make_group_layer(channels * 2, depth * 3))
self.dark4 = nn.Sequential(*self.make_group_layer(channels * 4, depth * 3))
self.dark5 = nn.Sequential(
BaseConv(channels * 8, channels * 16, 3, 2),
SPPBottleneck(channels * 16, channels * 16),
CSPLayer(channels * 16, channels * 16, depth, False),
)
@staticmethod
def make_group_layer(in_channels: int, depth: int) -> Tuple[BaseConv, CSPLayer]:
"""Starts with BaseConv layer, followed by a CSPLayer.
Args:
in_channels (int): Number of channels in the input image.
depth (int): Number of Bottlenecks.
Returns:
(Tuple[BaseConv, CSPLayer]): A group layer consisting of a BaseConv
CSPLayer.
"""
return (
BaseConv(in_channels, in_channels * 2, 3, 2),
CSPLayer(in_channels * 2, in_channels * 2, depth),
)
def forward(self, inputs: torch.Tensor) -> Dict[str, torch.Tensor]:
"""Defines the computation performed at every call.
Args:
inputs (torch.Tensor): Input from the previous layer.
Returns:
(Dict[str, torch.Tensor]): A dictionary of tensors with keys
corresponding to `self.out_features`.
"""
outputs = {}
inputs = self.stem(inputs)
outputs["stem"] = inputs
inputs = self.dark2(inputs)
outputs["dark2"] = inputs
inputs = self.dark3(inputs)
outputs["dark3"] = inputs
inputs = self.dark4(inputs)
outputs["dark4"] = inputs
inputs = self.dark5(inputs)
outputs["dark5"] = inputs
return {k: v for k, v in outputs.items() if k in self.out_features}
|
the-stack_106_30272 | """
Gets concordance for keywords and groups by word.
"""
from defoe import query_utils
from defoe.alto.query_utils import get_page_matches
def do_query(archives, config_file=None, logger=None, context=None):
"""
Gets concordance for keywords and groups by word.
config_file must be the path to a configuration file with a list
of the keywords to search for, one per line.
Both keywords and words in documents are normalized, by removing
all non-'a-z|A-Z' characters.
Returns result of form:
{
<WORD>:
[
{ "title": <TITLE>,
"place": <PLACE>,
"publisher": <PUBLISHER>,
"page_number": <PAGE_NUMBER>,
"content": <PAGE_CONTENT>,
"year": <YEAR>,
"document_id": <DOCUMENT_ID>,
"filename": <FILENAME>
},
...
],
<WORD>:
...
}
:param archives: RDD of defoe.alto.archive.Archive
:type archives: pyspark.rdd.PipelinedRDD
:param config_file: query configuration file
:type config_file: str or unicode
:param logger: logger (unused)
:type logger: py4j.java_gateway.JavaObject
:return: information on documents in which keywords occur grouped
by word
:rtype: dict
"""
keywords = []
with open(config_file, "r") as f:
keywords = [query_utils.normalize(word) for word in list(f)]
# [document, ...]
documents = archives.flatMap(
lambda archive: [document for document in list(archive)])
# [(year, document, page, word), ...]
filtered_words = documents.flatMap(
lambda document: get_page_matches(document,
keywords))
# [(year, document, page, word), ...]
# =>
# [(word, {"title": title, ...}), ...]
matching_docs = filtered_words.map(
lambda year_document_page_word:
(year_document_page_word[3],
{"title": year_document_page_word[1].title,
"place": year_document_page_word[1].place,
"publisher": year_document_page_word[1].publisher,
"page_number": year_document_page_word[2].code,
"content": year_document_page_word[2].content,
"year": year_document_page_word[0],
"document_id": year_document_page_word[1].code,
"filename": year_document_page_word[1].archive.filename}))
# [(word, {"title": title, ...}), ...]
# =>
# [(word, [{"title": title, ...], {...}), ...)]
result = matching_docs \
.groupByKey() \
.map(lambda year_context:
(year_context[0], list(year_context[1]))) \
.collect()
return result
|
the-stack_106_30273 | import numpy as np
import cv2
import tensorflow as tf
import os
import webbrowser
url = 'http://localhost:8080/index.html'
chrome_path = '/usr/bin/google-chrome %s'
data_dir = "dataset"
labels = next(os.walk(data_dir))[1]
labels.sort()
img_size = 256
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read() # Read video stream
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # convert BGR color to RGB
cv2.imshow('frame',img)
if cv2.waitKey(1) & 0xFF == ord('p'): # On click of 'p' run prediction
new_array = cv2.resize(img, (img_size, img_size))
new_array = new_array.reshape(-1, img_size, img_size, 3)
model = tf.keras.models.load_model("model.h5") # Load model
cv2.imwrite("./app_data/test.jpg",new_array)
prediction = model.predict(np.array(new_array)/255)
pred_label = labels[int(np.where(prediction == np.amax(prediction))[0])]
webbrowser.get(chrome_path).open(url+"?name="+pred_label) # Open up webpage for addtional details
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() |
the-stack_106_30274 | from __future__ import absolute_import
from __future__ import print_function
import argparse
import numpy as np
import time
from models.simple import SimpleModel
from models.contrastive import ContrastiveModel
"""
Basic deep neural network that works with SVM input
"""
parser = argparse.ArgumentParser(description='Basic deep neural network that works with SVM input')
parser.add_argument('--train', default='', required=True, help='Location of the training set', metavar='#')
parser.add_argument('--output', default='model', help='Save model to', metavar='#')
parser.add_argument('--max_epochs', default=100, help='Maximum amount of epochs', metavar='#')
parser.add_argument('--layers', default=5, help='Amount of hidden layers', metavar='#')
parser.add_argument('--units', default=256, help='Amount of hidden units per layer', metavar='#')
parser.add_argument('--batch_size', default=256, help='Amount of samples in a batch', metavar='#')
parser.add_argument('--contrastive', default=False, action='store_true', help='Use contrastive model for training')
args = parser.parse_args()
args.batch_size = int(args.batch_size)
args.units = int(args.units)
args.layers = int(args.layers)
args.max_epochs = int(args.max_epochs)
print("Initialized with settings:")
print(vars(args))
# Restoring embeddings and preparing reader to produce batches
print("Initializing training set reader")
SelectedModel = SimpleModel
if args.contrastive:
SelectedModel = ContrastiveModel
batch_producer = SelectedModel.get_producer(args.train)
print("Test batch:")
batch, labels = batch_producer.produce(2)
for idx, _ in enumerate(labels):
print(" Features: ", batch[idx])
print(" Label: ", labels[idx])
print("")
model = SelectedModel("Model", batch_producer.max_index, len(batch_producer.labels))
model.units(args.units).layers(args.layers).batch_size(args.batch_size).max_epochs(args.max_epochs)
model.train(batch_producer)
model.save_to_file(args.output)
print("Done")
|
the-stack_106_30277 | #!/usr/bin/env python3
import sys
import struct
import os
from scapy.all import sniff, sendp, hexdump, get_if_list, get_if_hwaddr
from scapy.all import Packet, IPOption
from scapy.all import ShortField, IntField, LongField, BitField, FieldListField, FieldLenField
from scapy.all import IP, TCP, UDP, Raw
from scapy.layers.inet import _IPOption_HDR
def get_if():
ifs=get_if_list()
iface=None
for i in get_if_list():
if "eth0" in i:
iface=i
break;
if not iface:
print("Cannot find eth0 interface")
exit(1)
return iface
class IPOption_MRI(IPOption):
name = "MRI"
option = 31
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B",
length_of="swids",
adjust=lambda pkt,l:l+4),
ShortField("count", 0),
FieldListField("swids",
[],
IntField("", 0),
length_from=lambda pkt:pkt.count*4) ]
def handle_pkt(pkt):
if TCP in pkt and pkt[TCP].dport == 1234:
print("got a packet")
pkt.show2()
# hexdump(pkt)
sys.stdout.flush()
def main():
ifaces = [i for i in os.listdir('/sys/class/net/') if 'eth' in i]
iface = ifaces[0]
print(("sniffing on %s" % iface))
sys.stdout.flush()
sniff(iface = iface,
prn = lambda x: handle_pkt(x))
if __name__ == '__main__':
main()
|
the-stack_106_30279 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright 2017-2020 Airinnova AB and the Airfoils authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------
# Authors:
# * Aaron Dettmann
"""
Provides tools to create and modify airfoil objects
Developed for Airinnova AB, Stockholm, Sweden.
"""
from datetime import datetime
import os
import re
import numpy as np
from scipy.interpolate import interp1d
from scipy.misc import derivative
import matplotlib.pyplot as plt
POINTS_AIRFOIL = 200
class NACADefintionError(Exception):
"""Raised when the NACA identifier number is not valid"""
pass
class Airfoil:
def __init__(self, upper, lower):
"""
Main constructor method
Args:
:upper: 2 x N array with x- and y-coordinates of the upper side
:lower: 2 x N array with x- and y-coordinates of the lower side
Note:
* During initialisation data points are automatically ordered
and normalised if necessary.
"""
# Always use Numpy arrays
upper = np.array(upper, dtype=float)
lower = np.array(lower, dtype=float)
# Unpack coordinates
self._x_upper, self._y_upper = upper
self._x_lower, self._y_lower = lower
# Process coordinates
self.norm_factor = 1
self._order_data_points()
self._normalise_data_points()
# Remove duplicate points from coordinate vectors. x-values must be
# unique. Values passed to iterp1d() must be monotonically increasing.
self._x_upper, idx_keep = np.unique(self._x_upper, return_index=True)
self._y_upper = self._y_upper[idx_keep]
self._x_lower, idx_keep = np.unique(self._x_lower, return_index=True)
self._y_lower = self._y_lower[idx_keep]
# Make interpolation functions for 'y_upper' and 'y_lower'
self._y_upper_interp = interp1d(
self._x_upper,
self._y_upper,
kind='cubic',
bounds_error=False,
fill_value="extrapolate"
)
self._y_lower_interp = interp1d(
self._x_lower,
self._y_lower,
kind='cubic',
bounds_error=False,
fill_value="extrapolate"
)
def __str__(self):
return self.__class__.__name__ + "(upper, lower)"
def __repr__(self):
return self.__class__.__name__ + "(upper, lower)"
def y_upper(self, x):
return self._y_upper_interp(x)
def y_lower(self, x):
return self._y_lower_interp(x)
@classmethod
def NACA4(cls, naca_digits, n_points=POINTS_AIRFOIL):
"""
Create an airfoil object from a NACA 4-digit series definition
Note:
* This is an alternative constructor method
Args:
:naca_digits: String like '4412'
:points: Total number of points used to create the airfoil
Returns:
:airfoil: New airfoil instance
"""
re_4digits = re.compile(r"^\d{4}$")
if re_4digits.match(naca_digits):
p = float(naca_digits[0])/10
m = float(naca_digits[1])/100
xx = float(naca_digits[2:4])/100
else:
raise NACADefintionError("Identifier not recognised as valid NACA 4 definition")
upper, lower = gen_NACA4_airfoil(p, m, xx, n_points)
return cls(upper, lower)
@classmethod
def morph_new_from_two_foils(cls, airfoil1, airfoil2, eta, n_points):
"""
Create an airfoil object from a linear interpolation between two
airfoil objects
Note:
* This is an alternative constructor method
Args:
:airfoil1: Airfoil object at eta = 0
:airfoil2: Airfoil object at eta = 1
:eta: Relative position where eta = [0, 1]
:n_points: Number of points for new airfoil object
Returns:
:airfoil: New airfoil instance
"""
if not 0 <= eta <= 1:
raise ValueError(f"'eta' must be in range [0,1], given eta is {float(eta):.3f}")
x = np.linspace(0, 1, n_points)
y_upper_af1 = airfoil1.y_upper(x)
y_lower_af1 = airfoil1.y_lower(x)
y_upper_af2 = airfoil2.y_upper(x)
y_lower_af2 = airfoil2.y_lower(x)
y_upper_new = y_upper_af1*(1 - eta) + y_upper_af2*eta
y_lower_new = y_lower_af1*(1 - eta) + y_lower_af2*eta
upper = np.array([x, y_upper_new])
lower = np.array([x, y_lower_new])
return cls(upper, lower)
@property
def all_points(self):
"""
Returns a single 2 x N array with x and y-coordinates in separate columns
"""
all_points = np.array([
np.concatenate((self._x_upper, self._x_lower)),
np.concatenate((self._y_upper, self._y_lower))
])
return all_points
def _order_data_points(self):
"""
Order the data points so that x-coordinate starts at 0
"""
if self._x_upper[0] > self._x_upper[-1]:
self._x_upper = np.flipud(self._x_upper)
self._y_upper = np.flipud(self._y_upper)
if self._x_lower[0] > self._x_lower[-1]:
self._x_lower = np.flipud(self._x_lower)
self._y_lower = np.flipud(self._y_lower)
def _normalise_data_points(self):
"""
Normalise data points so that x ranges from 0 to 1
"""
self.norm_factor = abs(self._x_upper[-1] - self._x_upper[0])
self._x_upper /= self.norm_factor
self._y_upper /= self.norm_factor
self._x_lower /= self.norm_factor
self._y_lower /= self.norm_factor
def plot(self, *, show=True, save=False, settings={}):
"""
Plot the airfoil and camber line
Note:
* 'show' and/or 'save' must be True
Args:
:show: (bool) Create an interactive plot
:save: (bool) Save plot to file
:settings: (bool) Plot settings
Plot settings:
* Plot settings must be a dictionary
* Allowed keys:
'points': (bool) ==> Plot coordinate points
'camber': (bool) ==> Plot camber
'chord': (bool) ==> Plot chord
'path': (str) ==> Output path (directory path, must exists)
'file_name': (str) ==> Full file name
Returns:
None or 'file_name' (full path) if 'save' is True
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim([0, 1])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.axis('equal')
ax.grid()
ax.plot(self._x_upper, self._y_upper, '-', color='blue')
ax.plot(self._x_lower, self._y_lower, '-', color='green')
if settings.get('points', False):
ax.plot(self.all_points[0, :], self.all_points[1, :], '.', color='grey')
if settings.get('camber', False):
x = np.linspace(0, 1, int(POINTS_AIRFOIL/2))
ax.plot(x, self.camber_line(x), '--', color='red')
if settings.get('chord', False):
pass
plt.subplots_adjust(left=0.10, bottom=0.10, right=0.98, top=0.98, wspace=None, hspace=None)
if show:
plt.show()
if save:
path = settings.get('path', '.')
file_name = settings.get('file_name', False)
if not file_name:
now = datetime.strftime(datetime.now(), format='%F_%H%M%S')
file_type = 'png'
file_name = f'airfoils_{now}.{file_type}'
fig.savefig(os.path.join(path, file_name))
return file_name
def camber_line(self, x):
"""
Compute the camber line
Method 1: y_camber = (y_upper + y_lower)/2
Args:
:x: Relative chordwise coordinate ranging from 0 to 1
Returns:
:camber_line: y-coordinates at given x positions
"""
return (self.y_upper(x) + self.y_lower(x))/2
def camber_line_angle(self, x):
"""
Compute the camber line angle
Args:
:x: Relative chordwise coordinate ranging from 0 to 1
Returns:
:theta: Camber line angle at given x positions
"""
########################
x = np.asarray(x)
scalar_input = False
if x.ndim == 0:
x = x[None] # Make 1D array
scalar_input = True
########################
dydx = derivative(self.camber_line, x, dx=1e-12)
theta = np.rad2deg(np.arctan(dydx))
theta = np.array([0 if abs(x) > 50 else x for x in theta])
########################
if scalar_input:
return np.squeeze(theta)
return theta
########################
class MorphAirfoil:
def __init__(self, airfoil1, airfoil2, n_points=POINTS_AIRFOIL):
"""
Wrapper class that returns a morphed airfoil at specified eta position
Attributes:
:airfoil1: Airfoil object at eta = 0
:airfoil2: Airfoil object at eta = 1
:n_points: Number of points for new airfoil object
"""
self.airfoil1 = airfoil1
self.airfoil2 = airfoil2
self.n_points = n_points
def at_eta(self, eta):
"""
Returns a new airfoil object at a given eta position
Args:
:eta: (float) eta position where eta = [0, 1]
Returns:
:morphed_airfoil: (obj) interpolated airfoil object at the given eta position
"""
return Airfoil.morph_new_from_two_foils(
self.airfoil1,
self.airfoil2,
eta=eta,
n_points=self.n_points
)
def gen_NACA4_airfoil(p, m, xx, n_points):
"""
Generate upper and lower points for a NACA 4 airfoil
Args:
:p:
:m:
:xx:
:n_points:
Returns:
:upper: 2 x N array with x- and y-coordinates of the upper side
:lower: 2 x N array with x- and y-coordinates of the lower side
"""
def yt(xx, xsi):
# Thickness distribution
a0 = 1.4845
a1 = 0.6300
a2 = 1.7580
a3 = 1.4215
a4 = 0.5075
return xx*(a0*np.sqrt(xsi) - a1*xsi - a2*xsi**2 + a3*xsi**3 - a4*xsi**4)
def yc(p, m, xsi):
# Camber line
def yc_xsi_lt_p(xsi):
return (m/p**2)*(2*p*xsi - xsi**2)
def dyc_xsi_lt_p(xsi):
return (2*m/p**2)*(p - xsi)
def yc_xsi_ge_p(xsi):
return (m/(1 - p)**2)*(1 - 2*p + 2*p*xsi - xsi**2)
def dyc_xsi_ge_p(xsi):
return (2*m/(1 - p)**2)*(p - xsi)
yc = np.array([yc_xsi_lt_p(x) if x < p else yc_xsi_ge_p(x) for x in xsi])
dyc = np.array([dyc_xsi_lt_p(x) if x < p else dyc_xsi_ge_p(x) for x in xsi])
return yc, dyc
xsi = np.linspace(0, 1, n_points)
yt = yt(xx, xsi)
yc, dyc = yc(p, m, xsi)
theta = np.arctan(dyc)
x_upper = xsi - yt*np.sin(theta)
y_upper = yc + yt*np.cos(theta)
x_lower = xsi + yt*np.sin(theta)
y_lower = yc - yt*np.cos(theta)
upper = np.array([x_upper, y_upper])
lower = np.array([x_lower, y_lower])
return upper, lower
|
the-stack_106_30280 | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
# autodoc_mock_imports = ["digitalio", "busio"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit CircuitPython DotStar Library"
copyright = "2017 Scott Shawcroft, Limor Fried & Damien P. George"
author = "Scott Shawcroft, Limor Fried & Damien P. George"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "AdafruitDotStarLibrarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitDotStarLibrary.tex",
"Adafruit DotStar Library Documentation",
author,
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"adafruitDotStarlibrary",
"Adafruit DotStar Library Documentation",
[author],
1,
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitDotStarLibrary",
"Adafruit DotStar Library Documentation",
author,
"AdafruitDotStarLibrary",
"One line description of project.",
"Miscellaneous",
),
]
|
the-stack_106_30282 | from collections import OrderedDict
from typing import Any, Generic, Iterable, Mapping, Type, TypeVar, Union
from miscutils.mappings import Namespace
__all__ = [
"EnvParser",
"Param",
"Error",
"InvalidParam",
"ParseError",
"InvalidValue",
"MissingValue",
]
class Error(Exception):
"""Base Exception type for the ``envparse`` module."""
def _fmt(self, msg: str) -> str:
return f"error: {msg}"
class InvalidParam(Error):
"""Invalid ``Param`` instantiation."""
def __init__(self, param: "Param", msg: str):
self.path = None
self.param = param
self.msg = msg
def __str__(self) -> str:
return self._fmt(f"invalid param defined at {self.path}: {self.msg}")
class ParseError(Error):
"""A parameter was not satisfied while parsing."""
def __init__(self, param: "Param"):
self.param = param
class InvalidValue(ParseError):
def __init__(self, param: "Param", value: Any, expected: Any):
super().__init__(param)
self.value = value
self.expected = expected
def __str__(self):
return self._fmt(
f"invalid value for {self.param.envvar}:"
f" expected {self.expected}, got {self.value}"
)
class MissingValue(ParseError):
def __str__(self):
return self._fmt(f"{self.param.envvar} is required")
class DEFAULT:
"""Non-None default value."""
Default = Type[DEFAULT]
T = TypeVar("ParamType", str, int, float, bool)
class Param(Generic[T]):
def __init__(
self,
type_: Type[T] = str,
*,
default: Union[T, Default] = DEFAULT,
required: Union[bool, Default] = DEFAULT,
):
self.type = type_
self.default = default
self.required = required
self.name = None
self.breadcrumbs = []
def _prepare(self):
"""Validates and prepares the Param for reading values."""
# If there is no `default`, `required` defaults to True.
if self.default is DEFAULT:
self.default = None
if self.required is DEFAULT:
self.required = True
# If there is a `default`, param MUST NOT be required.
else:
if self.required is True:
raise InvalidParam(
self, "cannot have a default and be required"
)
self.required = False
# `default` must be an instance of `type`.
if not isinstance(self.default, self.type):
raise InvalidParam(
self,
f"param was defined with type {self.type.__name__}, but"
f" `default` has type {type(self.default).__name__}",
)
def register(self, name: str, breadcrumbs: Iterable[str]) -> "Param[T]":
"""Registers the Param within the larger config structure.
This method is called by the parent ``EnvParser`` to validate the
parameter, contextualize it within the config structure, and get it
into a ready state for parsing.
Args:
name: The parameter's key in the parent ``EnvParser``.
breadcrumbs: A series of keys that locates the parameter from the
top-level ``EnvParser``.
Returns:
self: This is just a convenience to allow method chaining.
Raises:
InvalidParam: If the Param is invalid.
"""
try:
self._prepare()
except InvalidParam as exc:
exc.path = ".".join((*breadcrumbs, name))
raise
self.name = name
self.breadcrumbs = breadcrumbs
return self
@property
def envvar(self) -> str:
"""Returns the environment variable that will be read by this Param."""
return "_".join((*self.breadcrumbs, self.name)).upper()
def read(self, env: Mapping[str, str]) -> T:
"""Attempts to read ``self.envvar`` from the given environment.
Args:
env: The environment to read from. Can be any mapping type.
Returns:
The result of reading and processing the environment variable.
Raises:
MissingValue: If there is no value (and the Param is required).
InvalidValue: If the value exists but is invalid.
"""
value = env.get(self.envvar)
if not value:
if self.required:
raise MissingValue(self)
return self.default
if self.type is bool:
if value.lower() in ("1", "true"):
return True
elif value.lower() in ("0", "false"):
return False
raise InvalidValue(self, value, expected="bool")
try:
return self.type(value)
except (TypeError, ValueError):
raise InvalidValue(self, value, expected=self.type.__name__)
class EnvParser:
"""A simple parser for keyed data.
EnvParser parses flat data into nested structures. Parsers can be
designed recursively in a nested map structure, where ``Param``s are the
terminal nodes and ``EnvParser`` introduce another level of nesting.
Example:
>>> parser = EnvParser(
name=Param(str),
class=Param(str, default='monk'),
skills=EnvParser(
meditation=Param(bool),
fighting=Param(bool),
),
).register('player')
>>> env = dict(
PLAYER_NAME='Foo',
PLAYER_SKILLS_MEDITATION='true',
PLAYER_SKILLS_FIGHTING='false',
)
>>> opts = parser.read(env)
>>> print(opts.name)
Foo
>>> print(opts.skills.meditation)
True
>>> print(opts.skills.fighting)
False
"""
def __init__(self, **params: Union[Param, "EnvParser"]):
super().__init__()
self.initial_params = params
self.params = OrderedDict()
self.name = None
self.breadcrumbs = []
def register(
self, name: str = None, breadcrumbs: Iterable[str] = None
) -> "EnvParser":
self.breadcrumbs = [] if breadcrumbs is None else breadcrumbs
if name:
self.name = name
breadcrumbs = [*self.breadcrumbs, name]
else:
breadcrumbs = list(self.breadcrumbs)
for key, param in self.initial_params.items():
param.register(key, breadcrumbs=breadcrumbs)
self.params[key] = param
return self
def read(self, env: Mapping[str, str]) -> Namespace:
ns = Namespace()
for name, param in self.params.items():
ns[name] = param.read(env)
return ns
|
the-stack_106_30285 | """
Support for Wink lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.wink/
"""
import asyncio
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, SUPPORT_BRIGHTNESS,
SUPPORT_COLOR_TEMP, SUPPORT_COLOR, Light)
from homeassistant.components.wink import DOMAIN, WinkDevice
from homeassistant.util import color as color_util
from homeassistant.util.color import \
color_temperature_mired_to_kelvin as mired_to_kelvin
DEPENDENCIES = ['wink']
SUPPORT_WINK = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Wink lights."""
import pywink
for light in pywink.get_light_bulbs():
_id = light.object_id() + light.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkLight(light, hass)])
for light in pywink.get_light_groups():
_id = light.object_id() + light.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkLight(light, hass)])
class WinkLight(WinkDevice, Light):
"""Representation of a Wink light."""
@asyncio.coroutine
def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['light'].append(self)
@property
def is_on(self):
"""Return true if light is on."""
return self.wink.state()
@property
def brightness(self):
"""Return the brightness of the light."""
if self.wink.brightness() is not None:
return int(self.wink.brightness() * 255)
return None
@property
def hs_color(self):
"""Define current bulb color."""
if self.wink.supports_xy_color():
return color_util.color_xy_to_hs(*self.wink.color_xy())
if self.wink.supports_hue_saturation():
hue = self.wink.color_hue()
saturation = self.wink.color_saturation()
if hue is not None and saturation is not None:
return hue*360, saturation*100
return None
@property
def color_temp(self):
"""Define current bulb color in degrees Kelvin."""
if not self.wink.supports_temperature():
return None
return color_util.color_temperature_kelvin_to_mired(
self.wink.color_temperature_kelvin())
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_WINK
def turn_on(self, **kwargs):
"""Turn the switch on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
hs_color = kwargs.get(ATTR_HS_COLOR)
color_temp_mired = kwargs.get(ATTR_COLOR_TEMP)
state_kwargs = {}
if hs_color:
if self.wink.supports_xy_color():
xy_color = color_util.color_hs_to_xy(*hs_color)
state_kwargs['color_xy'] = xy_color
if self.wink.supports_hue_saturation():
hs_scaled = hs_color[0]/360, hs_color[1]/100
state_kwargs['color_hue_saturation'] = hs_scaled
if color_temp_mired:
state_kwargs['color_kelvin'] = mired_to_kelvin(color_temp_mired)
if brightness:
state_kwargs['brightness'] = brightness / 255.0
self.wink.set_state(True, **state_kwargs)
def turn_off(self, **kwargs):
"""Turn the switch off."""
self.wink.set_state(False)
|
the-stack_106_30286 | #!/bin/python
import sys, getopt
def main(argv):
opts, args = getopt.getopt(argv, "hi")
invertColormap = False
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt == '-i':
invertColormap = True
data = open(args[0], 'rb').read()
str_out = "#include <stdint.h>\n#include <avr/pgmspace.h>\n\nconst uint8_t image_data[0x12c1] PROGMEM = {"
for i in range(0, (320*120) / 8):
val = 0;
for j in range(0, 8):
val |= ord(data[(i * 8) + j]) << j
if (invertColormap):
val = ~val & 0xFF;
else:
val = val & 0xFF;
str_out += hex(val) + ", "
str_out += "0x0};\n"
with open('image.c', 'w') as f:
f.write(str_out)
if (invertColormap):
print("{} converted with inverted colormap and saved to image.c".format(args[0]))
else:
print("{} converted with original colormap and saved to image.c".format(args[0]))
def usage():
print("To convert to image.c: bin2c.py yourImage.data")
print("To convert to an inverted image.c: bin2c.py -i yourImage.data")
if __name__ == "__main__":
if len(sys.argv[1:]) == 0:
usage()
sys.exit
else:
main(sys.argv[1:])
|
the-stack_106_30287 | import numpy as np
import jax
from jax import jit
import collections, itertools
from functools import lru_cache as cache
from .representation import Rep, ScalarRep, Scalar
from .linear_operator_base import LinearOperator
from .linear_operators import LazyPerm, LazyDirectSum, LazyKron, LazyKronsum, I, lazy_direct_matmat, lazify, product, \
torch_lazy_direct_matmat
from functools import reduce
from collections import defaultdict
from plum import dispatch
from emlp.utils import memory
class TorchLazyP:
def __init__(self, Ps_values, multiplicities, perm, invperm):
self.Ps_values = Ps_values
self.multiplicities = multiplicities
self.perm = perm
self.invperm = invperm
def __call__(self, array):
return torch_lazy_direct_matmat(array[self.perm], self.Ps_values, self.multiplicities)[self.invperm]
class SumRep(Rep):
def __init__(self, *reps, extra_perm=None): # repcounter,repperm=None):
super().__init__()
""" Constructs a tensor type based on a list of tensor ranks
and possibly the symmetry generators gen."""
# Integers can be used as shorthand for scalars.
reps = [SumRepFromCollection({Scalar: rep}) if isinstance(rep, int) else rep for rep in reps]
# Get reps and permutations
reps, perms = zip(*[rep.canonicalize() for rep in reps])
rep_counters = [rep.reps if isinstance(rep, SumRep) else {rep: 1} for rep in reps]
# Combine reps and permutations: ∑_a + ∑_b = ∑_{a∪b}
# self.reps = sum(rep_counters,Counter({}))
self.reps, perm = self.compute_canonical(rep_counters, perms)
self.perm = extra_perm[perm] if extra_perm is not None else perm
self.invperm = np.argsort(self.perm)
self.canonical = (self.perm == np.arange(len(self.perm))).all()
self.is_permutation = all(rep.is_permutation for rep in self.reps.keys())
def size(self):
return sum(rep.size() * count for rep, count in self.reps.items())
def rho(self, M):
rhos = [rep.rho(M) for rep in self.reps]
multiplicities = self.reps.values()
return LazyPerm(self.invperm) @ LazyDirectSum(rhos, multiplicities) @ LazyPerm(self.perm)
def drho(self, A):
drhos = [rep.drho(A) for rep in self.reps]
multiplicities = self.reps.values()
return LazyPerm(self.invperm) @ LazyDirectSum(drhos, multiplicities) @ LazyPerm(self.perm)
def __eq__(self, other):
return self.reps == other.reps and (self.perm == other.perm).all()
def __hash__(self):
assert self.canonical
return hash(tuple(self.reps.items()))
@property
def T(self):
""" only swaps to adjoint representation, does not reorder elems"""
return SumRep(*[rep.T for rep, c in self.reps.items() for _ in range(c)], extra_perm=self.perm)
# not necessarily still canonical ordered
# return SumRepFromCollection({rep.T:c for rep,c in self.reps.items()},self.perm)
def __repr__(self):
return "+".join(f"{count if count > 1 else ''}{repr(rep)}" for rep, count in self.reps.items())
def __str__(self):
tensors = "+".join(f"{count if count > 1 else ''}{rep}" for rep, count in self.reps.items())
return tensors # +f" @ d={self.d}" if self.d is not None else tensors
def canonicalize(self):
"""Returns a canonically ordered rep with order np.arange(self.size()) and the
permutation which achieves that ordering"""
return SumRepFromCollection(self.reps), self.perm
def __call__(self, G):
return SumRepFromCollection({rep(G): c for rep, c in self.reps.items()}, perm=self.perm)
@property
def concrete(self):
return True
def equivariant_basis(self):
""" Overrides default implementation with a more efficient version which decomposes the constraints
across the sum."""
Qs = {rep: rep.equivariant_basis() for rep in self.reps}
Qs = {rep: (jax.device_put(Q.astype(np.float32)) if isinstance(Q, (np.ndarray)) else Q) for rep, Q in
Qs.items()}
active_dims = sum([self.reps[rep] * Qs[rep].shape[-1] for rep in Qs.keys()])
multiplicities = self.reps.values()
def lazy_Q(array):
return lazy_direct_matmat(array, Qs.values(), multiplicities)[self.invperm]
return LinearOperator(shape=(self.size(), active_dims), matvec=lazy_Q, matmat=lazy_Q)
def equivariant_projector(self):
""" Overrides default implementation with a more efficient version which decomposes the constraints
across the sum."""
Ps = {rep: rep.equivariant_projector() for rep in self.reps}
multiplicities = self.reps.values()
def lazy_P(array):
return lazy_direct_matmat(array[self.perm], Ps.values(), multiplicities)[self.invperm] # [:,self.invperm]
return LinearOperator(shape=(self.size(), self.size()), matvec=lazy_P, matmat=lazy_P)
def torch_equivariant_projector(self):
""" Overrides default implementation with a more efficient version which decomposes the constraints
across the sum."""
Ps = {rep: rep.equivariant_projector() for rep in self.reps}
multiplicities = list(self.reps.values())
# def lazy_P(array):
# return torch_lazy_direct_matmat(array[self.perm], Ps.values(), multiplicities)[self.invperm] # [:,self.invperm]
lazy_P = TorchLazyP(list(Ps.values()), multiplicities, self.perm, self.invperm)
return LinearOperator(shape=(self.size(), self.size()), matvec=lazy_P, matmat=lazy_P)
# ##TODO: investigate why these more idiomatic definitions with Lazy Operators end up slower
# def equivariant_basis(self):
# Qs = [rep.equivariant_basis() for rep in self.reps]
# Qs = [(jax.device_put(Q.astype(np.float32)) if isinstance(Q,(np.ndarray)) else Q) for Q in Qs]
# multiplicities = self.reps.values()
# return LazyPerm(self.invperm)@LazyDirectSum(Qs,multiplicities)
# def equivariant_projector(self):
# Ps = [rep.equivariant_projector() for rep in self.reps]
# Ps = (jax.device_put(P.astype(np.float32)) if isinstance(P,(np.ndarray)) else P)
# multiplicities = self.reps.values()
# return LazyPerm(self.invperm)@LazyDirectSum(Ps,multiplicities)@LazyPerm(self.perm)
# Some additional SumRep specific methods to be used for internal purposes
@staticmethod
def compute_canonical(rep_cnters, rep_perms):
""" given that rep1_perm and rep2_perm are the canonical orderings for
rep1 and rep2 (ie v[rep1_perm] is in canonical order) computes
the canonical order for rep1 + rep2"""
# First: merge counters
unique_reps = sorted(reduce(lambda a, b: a | b, [cnter.keys() for cnter in rep_cnters]))
merged_cnt = defaultdict(int)
permlist = []
ids = [0] * len(rep_cnters)
shifted_perms = []
n = 0
for perm in rep_perms:
shifted_perms.append(n + perm)
n += len(perm)
for rep in unique_reps:
for i in range(len(ids)):
c = rep_cnters[i].get(rep, 0)
permlist.append(shifted_perms[i][ids[i]:ids[i] + c * rep.size()])
ids[i] += +c * rep.size()
merged_cnt[rep] += c
return dict(merged_cnt), np.concatenate(permlist)
def __iter__(self): # not a great idea to use this method (ignores permutation ordering)
return (rep for rep, c in self.reps.items() for _ in range(c))
def __len__(self):
return sum(multiplicity for multiplicity in self.reps.values())
def as_dict(self, v):
out_dict = {}
i = 0
for rep, c in self.reps.items():
chunk = c * rep.size()
out_dict[rep] = v[..., self.perm[i:i + chunk]].reshape(v.shape[:-1] + (c, rep.size()))
i += chunk
return out_dict
def both_concrete(rep1, rep2):
return all(rep.concrete for rep in (rep1, rep2) if hasattr(rep, 'concrete'))
@dispatch.multi((SumRep, Rep), (Rep, SumRep), (SumRep, SumRep))
@memory.cache
def mul_reps(ra, rb):
if not both_concrete(ra, rb):
return DeferredProductRep(ra, rb)
return distribute_product([ra, rb])
@dispatch
def mul_reps(ra, rb): # base case
if type(ra) is ScalarRep: return rb
if type(rb) is ScalarRep: return ra
if not both_concrete(ra, rb):
return DeferredProductRep(ra, rb)
if hasattr(ra, "G") and hasattr(rb, "G") and ra.G == rb.G:
return ProductRep(ra, rb)
return DirectProduct(ra, rb)
# TODO: consolidate with the __init__ method of the basic SumRep
class SumRepFromCollection(SumRep): # a different constructor for SumRep
def __init__(self, counter, perm=None):
self.reps = counter
self.perm = np.arange(self.size()) if perm is None else perm
self.reps, self.perm = self.compute_canonical([counter], [self.perm])
self.invperm = np.argsort(self.perm)
self.canonical = (self.perm == np.arange(len(self.perm))).all()
self.is_permutation = all(rep.is_permutation for rep in self.reps.keys())
# if not self.canonical:
# print(self,self.perm,self.invperm)
def distribute_product(reps, extra_perm=None):
""" For expanding products of sums into sums of products, (ρ₁⊕ρ₂)⊗ρ₃ = (ρ₁⊗ρ₃)⊕(ρ₂⊗ρ₃).
takes in a sequence of reps=[ρ₁,ρ₂,ρ₃,...] which are to be multiplied together and at
least one of the reps is a SumRep, and distributes out the terms."""
reps, perms = zip(*[repsum.canonicalize() for repsum in reps])
reps = [rep if isinstance(rep, SumRep) else SumRepFromCollection({rep: 1}) for rep in reps]
# compute axis_wise perm to canonical vector ordering along each axis
axis_sizes = [len(perm) for perm in perms]
order = np.arange(product(axis_sizes)).reshape(tuple(len(perm) for perm in perms))
for i, perm in enumerate(perms):
order = np.swapaxes(np.swapaxes(order, 0, i)[perm, ...], 0, i)
order = order.reshape(-1)
# logging.info(f"axiswise: {order}")
# Compute permutation from multilinear map ordering -> vector ordering (decomposing the blocks)
repsizes_all = []
for rep in reps:
this_rep_sizes = []
for r, c in rep.reps.items():
this_rep_sizes.extend([c * r.size()])
repsizes_all.append(tuple(this_rep_sizes))
block_perm = rep_permutation(tuple(repsizes_all))
# logging.info(f"block perm {block_perm}")
# must go from itertools product ordering to multiplicity grouped ordering
ordered_reps = []
each_perm = []
i = 0
for prod in itertools.product(*[rep.reps.items() for rep in reps]):
rs, cs = zip(*prod)
# import pdb; pdb.set_trace()
prod_rep, canonicalizing_perm = (product(cs) * reduce(lambda a, b: a * b, rs)).canonicalize()
# print(f"{rs}:{cs} in distribute yield prod_rep {prod_rep}")
ordered_reps.append(prod_rep)
shape = []
for r, c in prod:
shape.extend([c, r.size()])
axis_perm = np.concatenate([2 * np.arange(len(prod)), 2 * np.arange(len(prod)) + 1])
mul_perm = np.arange(len(canonicalizing_perm)).reshape(shape).transpose(axis_perm).reshape(-1)
each_perm.append(mul_perm[canonicalizing_perm] + i)
i += len(canonicalizing_perm)
each_perm = np.concatenate(each_perm)
# logging.info(f"each perm {each_perm}")
#
total_perm = order[block_perm[each_perm]]
if extra_perm is not None: total_perm = extra_perm[total_perm]
# TODO: could achieve additional reduction by canonicalizing at this step, but unnecessary for now
return SumRep(*ordered_reps, extra_perm=total_perm)
@cache(maxsize=None)
def rep_permutation(repsizes_all):
"""Permutation from block ordering to flattened ordering"""
size_cumsums = [np.cumsum([0] + [size for size in repsizes]) for repsizes in repsizes_all]
permutation = np.zeros([cumsum[-1] for cumsum in size_cumsums]).astype(int)
arange = np.arange(permutation.size)
indices_iter = itertools.product(*[range(len(repsizes)) for repsizes in repsizes_all])
i = 0
for indices in indices_iter:
slices = tuple([slice(cumsum[idx], cumsum[idx + 1]) for idx, cumsum in zip(indices, size_cumsums)])
slice_lengths = [sl.stop - sl.start for sl in slices]
chunk_size = np.prod(slice_lengths)
permutation[slices] += arange[i:i + chunk_size].reshape(*slice_lengths)
i += chunk_size
return np.argsort(permutation.reshape(-1))
class ProductRep(Rep):
def __init__(self, *reps, extra_perm=None, counter=None):
super().__init__()
# Two variants of the constructor:
if counter is not None: # one with counter specified directly
self.reps = counter
self.reps, self.perm = self.compute_canonical([counter], [
np.arange(self.size()) if extra_perm is None else extra_perm])
else: # other with list
# Get reps and permutations
reps, perms = zip(*[rep.canonicalize() for rep in reps])
rep_counters = [rep.reps if type(rep) == ProductRep else {rep: 1} for rep in reps]
# Combine reps and permutations: ∏_a ⊗ ∏_b = ∏_{a ∪ b}
self.reps, perm = self.compute_canonical(rep_counters, perms)
self.perm = extra_perm[perm] if extra_perm is not None else perm
self.invperm = np.argsort(self.perm)
self.canonical = (self.perm == self.invperm).all()
Gs = tuple(set(rep.G for rep in self.reps.keys()))
assert len(Gs) == 1, f"Multiple different groups {Gs} in product rep {self}"
self.G = Gs[0]
self.is_permutation = all(rep.is_permutation for rep in self.reps.keys())
def size(self):
return product([rep.size() ** count for rep, count in self.reps.items()])
def rho(self, Ms, lazy=False):
if hasattr(self, 'G') and isinstance(Ms, dict): Ms = Ms[self.G]
canonical_lazy = LazyKron([rep.rho(Ms) for rep, c in self.reps.items() for _ in range(c)])
return LazyPerm(self.invperm) @ canonical_lazy @ LazyPerm(self.perm)
def drho(self, As):
if hasattr(self, 'G') and isinstance(As, dict): As = As[self.G]
canonical_lazy = LazyKronsum([rep.drho(As) for rep, c in self.reps.items() for _ in range(c)])
return LazyPerm(self.invperm) @ canonical_lazy @ LazyPerm(self.perm)
def __hash__(self):
assert self.canonical, f"Not canonical {repr(self)}? perm {self.perm}"
return hash(tuple(self.reps.items()))
def __eq__(self, other): # TODO: worry about non canonical?
return isinstance(other, ProductRep) and self.reps == other.reps and (self.perm == other.perm).all()
@property
def concrete(self):
return True
@property
def T(self):
""" only swaps to adjoint representation, does not reorder elems"""
return self.__class__(*[rep.T for rep, c in self.reps.items() for _ in range(c)], extra_perm=self.perm)
# return self.__class__(counter={rep.T:c for rep,c in self.reps.items()},extra_perm=self.perm)
def __str__(self):
superscript = str.maketrans("0123456789", "⁰¹²³⁴⁵⁶⁷⁸⁹")
return "⊗".join([str(rep) + (f"{c}".translate(superscript) if c > 1 else "") for rep, c in self.reps.items()])
def canonicalize(self):
"""Returns a canonically ordered rep with order np.arange(self.size()) and the
permutation which achieves that ordering"""
return self.__class__(counter=self.reps), self.perm
@staticmethod
def compute_canonical(rep_cnters, rep_perms):
""" given that rep1_perm and rep2_perm are the canonical orderings for
rep1 and rep2 (ie v[rep1_perm] is in canonical order) computes
the canonical order for rep1 * rep2"""
order = np.arange(product(len(perm) for perm in rep_perms))
# First: merge counters
unique_reps = sorted(reduce(lambda a, b: a | b, [cnter.keys() for cnter in rep_cnters]))
merged_cnt = defaultdict(int)
# Reshape like the tensor it is
order = order.reshape(tuple(len(perm) for perm in rep_perms))
# apply the canonicalizing permutations along each axis
for i, perm in enumerate(rep_perms):
order = np.moveaxis(np.moveaxis(order, i, 0)[perm, ...], 0, i)
# sort the axes by canonical ordering
# get original axis ids
axis_ids = []
n = 0
for cnter in rep_cnters:
axis_idsi = {}
for rep, c in cnter.items():
axis_idsi[rep] = n + np.arange(c)
n += c
axis_ids.append(axis_idsi)
axes_perm = []
for rep in unique_reps:
for i in range(len(rep_perms)):
c = rep_cnters[i].get(rep, 0)
if c != 0:
axes_perm.append(axis_ids[i][rep])
merged_cnt[rep] += c
axes_perm = np.concatenate(axes_perm)
# reshaped but with inner axes within a collection explicitly expanded
order = order.reshape(tuple(rep.size() for cnter in rep_cnters for rep, c in cnter.items() for _ in range(c)))
final_order = np.transpose(order, axes_perm)
return dict(merged_cnt), final_order.reshape(-1)
class DirectProduct(ProductRep):
""" Tensor product of representations ρ₁⊗ρ₂, but where the sub representations
ρ₁ and ρ₂ are representations of distinct groups (ie ρ₁⊗ρ₂ is a representation
of the direct product of groups G=G₁×G₂). As a result, the solutions for the two
sub representations can be solved independently and assembled together with the
kronecker product: Q = Q₁⊗Q₂ and P = P₁⊗P₂"""
def __init__(self, *reps, counter=None, extra_perm=None):
# Two variants of the constructor:
if counter is not None: # one with counter specified directly
self.reps = counter
self.reps, perm = self.compute_canonical([counter], [np.arange(self.size())])
self.perm = extra_perm[perm] if extra_perm is not None else perm
else: # other with list
reps, perms = zip(*[rep.canonicalize() for rep in reps])
# print([type(rep) for rep in reps],type(rep1),type(rep2))
rep_counters = [rep.reps if type(rep) == DirectProduct else {rep: 1} for rep in reps]
# Combine reps and permutations: Pi_a + Pi_b = Pi_{a x b}
reps, perm = self.compute_canonical(rep_counters, perms)
# print("dprod init",self.reps)
group_dict = defaultdict(lambda: 1)
for rep, c in reps.items():
group_dict[rep.G] = group_dict[rep.G] * rep ** c
sub_products = {rep: 1 for G, rep in group_dict.items()}
self.reps = counter = sub_products
self.reps, perm2 = self.compute_canonical([counter], [np.arange(self.size())])
self.perm = extra_perm[perm[perm2]] if extra_perm is not None else perm[perm2]
self.invperm = np.argsort(self.perm)
self.canonical = (self.perm == self.invperm).all()
# self.G = tuple(set(rep.G for rep in self.reps.keys()))
# if len(self.G)==1: self.G= self.G[0]
self.is_permutation = all(rep.is_permutation for rep in self.reps.keys())
assert all(count == 1 for count in self.reps.values())
def equivariant_basis(self):
canon_Q = LazyKron([rep.equivariant_basis() for rep, c in self.reps.items()])
return LazyPerm(self.invperm) @ canon_Q
def equivariant_projector(self):
canon_P = LazyKron([rep.equivariant_projector() for rep, c in self.reps.items()])
return LazyPerm(self.invperm) @ canon_P @ LazyPerm(self.perm)
def rho(self, Ms):
canonical_lazy = LazyKron([rep.rho(Ms) for rep, c in self.reps.items() for _ in range(c)])
return LazyPerm(self.invperm) @ canonical_lazy @ LazyPerm(self.perm)
def drho(self, As):
canonical_lazy = LazyKronsum([rep.drho(As) for rep, c in self.reps.items() for _ in range(c)])
return LazyPerm(self.invperm) @ canonical_lazy @ LazyPerm(self.perm)
def __str__(self):
superscript = str.maketrans("0123456789", "⁰¹²³⁴⁵⁶⁷⁸⁹")
return "⊗".join([str(rep) + f"_{rep.G}" for rep, c in self.reps.items()])
class DeferredSumRep(Rep):
def __init__(self, *reps):
super().__init__()
self.to_sum = []
for rep in reps:
# assert not isinstance(rep,SumRep),f"{rep} of type {type(rep)} tosum {self.to_sum}"
self.to_sum.extend(rep.to_sum if isinstance(rep, DeferredSumRep) else [rep])
def __call__(self, G):
if G is None: return self
return SumRep(*[rep(G) for rep in self.to_sum])
def __repr__(self):
return '(' + "+".join(f"{rep}" for rep in self.to_sum) + ')'
def __str__(self):
return repr(self)
@property
def T(self):
return DeferredSumRep(*[rep.T for rep in self.to_sum])
@property
def concrete(self):
return False
class DeferredProductRep(Rep):
def __init__(self, *reps):
super().__init__()
self.to_prod = []
for rep in reps:
assert not isinstance(rep, ProductRep)
self.to_prod.extend(rep.to_prod if isinstance(rep, DeferredProductRep) else [rep])
def __call__(self, G):
if G is None: return self
return reduce(lambda a, b: a * b, [rep(G) for rep in self.to_prod])
def __repr__(self):
return "⊗".join(f"{rep}" for rep in self.to_prod)
def __str__(self):
return repr(self)
@property
def T(self):
return DeferredProductRep(*[rep.T for rep in self.to_prod])
@property
def concrete(self):
return False
|
the-stack_106_30288 | import sys
sys.stdin = open("input.txt", "r")
#sys.stdout = open("output.txt", "w")
def dfs(graph, start):
visited, stack = set(), [start]
while stack:
vertex = stack.pop()
if vertex not in visited:
visited.add(vertex)
stack.extend(graph[vertex] - visited)
return visited
n = int(input())
mas = [list(map(int, input().split())) for i in range(n)]
visit = []
print(mas) |
the-stack_106_30289 | # Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implement Transformer Class for ViT
"""
import copy
import paddle
import paddle.nn as nn
from droppath import DropPath
from config import get_config
class Identity(nn.Layer):
""" Identity layer
The output of this layer is the input without any change.
Use this layer to avoid using 'if' condition in forward methods
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class PatchEmbedding(nn.Layer):
"""Patch Embedding and Position Embedding
Apply patch embedding and position embedding on input images.
Attributes:
patch_embddings: impl using a patch_size x patch_size Conv2D operation
position_embddings: a parameter with len = num_patch + 1(for cls_token)
cls_token: token insert to the patch feature for classification
dropout: dropout for embeddings
"""
def __init__(self,
image_size=224,
patch_size=16,
in_channels=3,
embed_dim=768,
dropout=0.):
super().__init__()
n_patches = (image_size // patch_size) * (image_size // patch_size)
self.patch_embedding = nn.Conv2D(in_channels=in_channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size)
self.position_embeddings = paddle.create_parameter(
shape=[1, n_patches + 1, embed_dim],
dtype='float32',
default_initializer=paddle.nn.initializer.TruncatedNormal(std=.02))
self.cls_token = paddle.create_parameter(
shape=[1, 1, embed_dim],
dtype='float32',
default_initializer=paddle.nn.initializer.Constant(0))
self.dropout = nn.Dropout(dropout)
def forward(self, x):
cls_tokens = self.cls_token.expand((x.shape[0], -1, -1))
x = self.patch_embedding(x)
x = x.flatten(2)
x = x.transpose([0, 2, 1])
x = paddle.concat((cls_tokens, x), axis=1)
embeddings = x + self.position_embeddings # tensor broadcast
embeddings = self.dropout(embeddings)
return embeddings
class Attention(nn.Layer):
""" Attention module
Attention module for ViT, here q, k, v are assumed the same.
The qkv mappings are stored as one single param.
Attributes:
num_heads: number of heads
attn_head_size: feature dim of single head
all_head_size: feature dim of all heads
qkv: a nn.Linear for q, k, v mapping
scales: 1 / sqrt(single_head_feature_dim)
out: projection of multi-head attention
attn_dropout: dropout for attention
proj_dropout: final dropout before output
softmax: softmax op for attention
"""
def __init__(self,
embed_dim,
num_heads,
attn_head_size=None,
qkv_bias=True,
dropout=0.,
attention_dropout=0.):
super().__init__()
assert isinstance(embed_dim, int), (
f"Expected the type of `embed_dim` to be {int}, but received {type(embed_dim)}.")
assert isinstance(num_heads, int), (
f"Expected the type of `num_heads` to be {int}, but received {type(num_heads)}.")
assert embed_dim > 0, (
f"Expected `embed_dim` to be greater than 0, but received {embed_dim}")
assert num_heads > 0, (
f"Expected `num_heads` to be greater than 0, but received {num_heads}")
self.embed_dim = embed_dim
self.num_heads = num_heads
if attn_head_size is not None:
assert isinstance(attn_head_size, int), (
f"Expected the type of `attn_head_size` to be {int}, "
f"but received {type(attn_head_size)}.")
assert attn_head_size > 0, f"Expected `attn_head_size` to be greater than 0," \
f" but received {attn_head_size}."
self.attn_head_size = attn_head_size
else:
self.attn_head_size = embed_dim // num_heads
assert self.attn_head_size * num_heads == embed_dim, (
f"`embed_dim` must be divisible by `num_heads`,"
f" but received embed_dim={embed_dim}, num_heads={num_heads}.")
self.all_head_size = self.attn_head_size * num_heads
w_attr_1, b_attr_1 = self._init_weights()
self.qkv = nn.Linear(embed_dim,
self.all_head_size * 3, # weights for q, k, and v
weight_attr=w_attr_1,
bias_attr=b_attr_1 if qkv_bias else False)
self.scales = self.attn_head_size ** -0.5
w_attr_2, b_attr_2 = self._init_weights()
self.out = nn.Linear(self.all_head_size,
embed_dim,
weight_attr=w_attr_2,
bias_attr=b_attr_2)
self.attn_dropout = nn.Dropout(attention_dropout)
self.proj_dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax(axis=-1)
def _init_weights(self):
weight_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform())
bias_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform())
return weight_attr, bias_attr
def transpose_multihead(self, x):
new_shape = x.shape[:-1] + [self.num_heads, self.attn_head_size]
x = x.reshape(new_shape)
x = x.transpose([0, 2, 1, 3])
return x
def forward(self, x):
qkv = self.qkv(x).chunk(3, axis=-1)
q, k, v = map(self.transpose_multihead, qkv)
attn = paddle.matmul(q, k, transpose_y=True)
attn = attn * self.scales
attn = self.softmax(attn)
attn_weights = attn
attn = self.attn_dropout(attn)
z = paddle.matmul(attn, v)
z = z.transpose([0, 2, 1, 3])
new_shape = z.shape[:-2] + [self.all_head_size]
z = z.reshape(new_shape)
# reshape
z = self.out(z)
z = self.proj_dropout(z)
return z, attn_weights
class Mlp(nn.Layer):
""" MLP module
Impl using nn.Linear and activation is GELU, dropout is applied.
Ops: fc -> act -> dropout -> fc -> dropout
Attributes:
fc1: nn.Linear
fc2: nn.Linear
act: GELU
dropout1: dropout after fc1
dropout2: dropout after fc2
"""
def __init__(self,
embed_dim,
mlp_ratio,
dropout=0.):
super().__init__()
w_attr_1, b_attr_1 = self._init_weights()
self.fc1 = nn.Linear(embed_dim,
int(embed_dim * mlp_ratio),
weight_attr=w_attr_1,
bias_attr=b_attr_1)
w_attr_2, b_attr_2 = self._init_weights()
self.fc2 = nn.Linear(int(embed_dim * mlp_ratio),
embed_dim,
weight_attr=w_attr_2,
bias_attr=b_attr_2)
self.act = nn.GELU()
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def _init_weights(self):
weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()) # default in pp: xavier
bias_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.Normal(std=1e-6)) # default in pp: zero
return weight_attr, bias_attr
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.dropout1(x)
x = self.fc2(x)
x = self.dropout2(x)
return x
class EncoderLayer(nn.Layer):
"""Encoder Layer
Encoder layer contains attention, norm, mlp and residual
Attributes:
hidden_size: transformer feature dim
attn_norm: nn.LayerNorm before attention
mlp_norm: nn.LayerNorm before mlp
mlp: mlp modual
attn: attention modual
"""
def __init__(self,
embed_dim,
num_heads,
attn_head_size=None,
qkv_bias=True,
mlp_ratio=4.,
dropout=0.,
attention_dropout=0.,
droppath=0.):
super().__init__()
w_attr_1, b_attr_1 = self._init_weights()
self.attn_norm = nn.LayerNorm(embed_dim,
weight_attr=w_attr_1,
bias_attr=b_attr_1,
epsilon=1e-6)
self.attn = Attention(embed_dim,
num_heads,
attn_head_size,
qkv_bias,
dropout,
attention_dropout)
self.drop_path = DropPath(droppath) if droppath > 0. else Identity()
w_attr_2, b_attr_2 = self._init_weights()
self.mlp_norm = nn.LayerNorm(embed_dim,
weight_attr=w_attr_2,
bias_attr=b_attr_2,
epsilon=1e-6)
self.mlp = Mlp(embed_dim, mlp_ratio, dropout)
def _init_weights(self):
weight_attr = paddle.ParamAttr(initializer=nn.initializer.Constant(0.0))
bias_attr = paddle.ParamAttr(initializer=nn.initializer.Constant(1.0))
return weight_attr, bias_attr
def forward(self, x):
h = x
x = self.attn_norm(x)
x, attn = self.attn(x)
x = self.drop_path(x)
x = x + h
h = x
x = self.mlp_norm(x)
x = self.mlp(x)
x = self.drop_path(x)
x = x + h
return x, attn
class Encoder(nn.Layer):
"""Transformer encoder
Encoder encoder contains a list of EncoderLayer, and a LayerNorm.
Attributes:
layers: nn.LayerList contains multiple EncoderLayers
encoder_norm: nn.LayerNorm which is applied after last encoder layer
"""
def __init__(self,
embed_dim,
num_heads,
depth,
attn_head_size=None,
qkv_bias=True,
mlp_ratio=4.0,
dropout=0.,
attention_dropout=0.,
droppath=0.):
super(Encoder, self).__init__()
# stochatic depth decay
depth_decay = [x.item() for x in paddle.linspace(0, droppath, depth)]
layer_list = []
for i in range(depth):
encoder_layer = EncoderLayer(embed_dim,
num_heads,
attn_head_size=attn_head_size,
qkv_bias=qkv_bias,
mlp_ratio=mlp_ratio,
dropout=dropout,
attention_dropout=attention_dropout,
droppath=depth_decay[i])
layer_list.append(copy.deepcopy(encoder_layer))
self.layers = nn.LayerList(layer_list)
w_attr_1, b_attr_1 = self._init_weights()
self.encoder_norm = nn.LayerNorm(embed_dim,
weight_attr=w_attr_1,
bias_attr=b_attr_1,
epsilon=1e-6)
def _init_weights(self):
weight_attr = paddle.ParamAttr(initializer=nn.initializer.Constant(0.0))
bias_attr = paddle.ParamAttr(initializer=nn.initializer.Constant(1.0))
return weight_attr, bias_attr
def forward(self, x):
self_attn = []
for layer in self.layers:
x, attn = layer(x)
self_attn.append(attn)
out = self.encoder_norm(x)
return out, self_attn
class VisualTransformer(nn.Layer):
"""ViT transformer
ViT Transformer, classifier is a single Linear layer for finetune,
For training from scratch, two layer mlp should be used.
Classification is done using cls_token.
Args:
image_size: int, input image size, default: 224
patch_size: int, patch size, default: 16
in_channels: int, input image channels, default: 3
num_classes: int, number of classes for classification, default: 1000
embed_dim: int, embedding dimension (patch embed out dim), default: 768
depth: int, number ot transformer blocks, default: 12
num_heads: int, number of attention heads, default: 12
mlp_ratio: float, ratio of mlp hidden dim to embed dim(mlp in dim), default: 4.0
qkv_bias: bool, If True, enable qkv(nn.Linear) layer with bias, default: True
dropout: float, dropout rate for linear layers, default: 0.
attention_dropout: float, dropout rate for attention layers default: 0.
droppath: float, droppath rate for droppath layers, default: 0.
"""
def __init__(self,
image_size=224,
patch_size=16,
in_channels=3,
num_classes=1000,
embed_dim=768,
depth=12,
num_heads=12,
attn_head_size=None,
mlp_ratio=4,
qkv_bias=True,
dropout=0.,
attention_dropout=0.,
droppath=0.,
train_from_scratch=False,
config=None):
super(VisualTransformer, self).__init__()
# create patch embedding with positional embedding
self.patch_embedding = PatchEmbedding(image_size,
patch_size,
in_channels,
embed_dim,
dropout)
# create multi head self-attention layers
self.encoder = Encoder(embed_dim,
num_heads,
depth,
attn_head_size,
qkv_bias,
mlp_ratio,
dropout,
attention_dropout,
droppath)
# classifier head (for training from scracth)
if train_from_scratch:
w_attr_1, b_attr_1 = self._init_weights()
w_attr_2, b_attr_2 = self._init_weights()
self.classifier = nn.Sequential(
nn.Linear(config.MODEL.TRANS.HIDDEN_SIZE,
config.MODEL.TRANS.HIDDEN_SIZE,
weight_attr=w_attr_1,
bias_attr=b_attr_1),
nn.ReLU(),
nn.Dropout(config.MODEL.DROPOUT),
nn.Linear(config.MODEL.TRANS.HIDDEN_SIZE,
config.MODEL.NUM_CLASSES,
weight_attr=w_attr_2,
bias_attr=b_attr_2),
nn.Dropout(config.MODEL.DROPOUT),
)
else:
# classifier head (for finetuning)
w_attr_1, b_attr_1 = self._init_weights()
self.classifier = nn.Linear(embed_dim,
num_classes,
weight_attr=w_attr_1,
bias_attr=b_attr_1)
def _init_weights(self):
weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.KaimingUniform())
bias_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.KaimingUniform())
return weight_attr, bias_attr
def forward(self, x):
x = self.patch_embedding(x)
x, attn = self.encoder(x)
logits = self.classifier(x[:, 0]) # take only cls_token as classifier
return logits
def build_vit(config):
model = VisualTransformer(image_size=config.DATA.IMAGE_SIZE,
patch_size=config.MODEL.TRANS.PATCH_SIZE,
in_channels=3,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.TRANS.EMBED_DIM,
depth=config.MODEL.TRANS.DEPTH,
num_heads=config.MODEL.TRANS.NUM_HEADS,
attn_head_size=config.MODEL.TRANS.ATTN_HEAD_SIZE,
mlp_ratio=config.MODEL.TRANS.MLP_RATIO,
qkv_bias=config.MODEL.TRANS.QKV_BIAS,
dropout=config.MODEL.DROPOUT,
attention_dropout=config.MODEL.ATTENTION_DROPOUT,
droppath=config.MODEL.DROPPATH,
train_from_scratch=False,
config=config)
return model
|
the-stack_106_30290 | import pytest
from django.urls import resolve, reverse
from islam_fitz.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
|
the-stack_106_30292 | import pytest
from unittest import TestCase
from pyflamegpu import *
import os
XML_FILE_NAME = "test.xml"
JSON_FILE_NAME = "test.json"
AGENT_COUNT = 100
class ValidateEnv(pyflamegpu.HostFunctionCallback):
"""
pyflamegpu requires step functions to be a class which extends the StepFunction base class.
This class must extend the run function
"""
def __init__(self):
super().__init__()
# default values for assertion checks
self.float =0
self.double = 0
self.int64_t = 0
self.uint64_t = 0
self.int32_t = 0
self.uint32_t = 0
self.int16_t = 0
self.uint16_t = 0
self.int8_t = 0
self.uint8_t = 0
self.float_a = ( )
self.double_a = ( )
self.int64_t_a = ( )
self.uint64_t_a = ( )
self.int32_t_a = ( )
self.uint32_t_a = ( )
self.int16_t_a = ( )
self.uint16_t_a = ( )
self.int8_t_a = ( )
self.uint8_t_a = ( )
self.validate_has_run = False
def run(self, FLAMEGPU):
"""
Assertions are not possible within the run function as this is a callback in the c++ library.
Instead values can be saved to the class and asserted after the model step function has completed.
"""
self.float = FLAMEGPU.environment.getPropertyFloat("float")
self.double = FLAMEGPU.environment.getPropertyDouble("double")
self.int64_t = FLAMEGPU.environment.getPropertyInt64("int64_t")
self.uint64_t = FLAMEGPU.environment.getPropertyUInt64("uint64_t")
self.int32_t = FLAMEGPU.environment.getPropertyInt32("int32_t")
self.uint32_t = FLAMEGPU.environment.getPropertyUInt32("uint32_t")
self.int16_t = FLAMEGPU.environment.getPropertyInt16("int16_t")
self.uint16_t = FLAMEGPU.environment.getPropertyUInt16("uint16_t")
self.int8_t = FLAMEGPU.environment.getPropertyInt8("int8_t")
self.uint8_t = FLAMEGPU.environment.getPropertyUInt8("uint8_t")
self.float_a = FLAMEGPU.environment.getPropertyArrayFloat("float_a")
self.double_a = FLAMEGPU.environment.getPropertyArrayDouble("double_a")
self.int64_t_a = FLAMEGPU.environment.getPropertyArrayInt64("int64_t_a")
self.uint64_t_a = FLAMEGPU.environment.getPropertyArrayUInt64("uint64_t_a")
self.int32_t_a = FLAMEGPU.environment.getPropertyArrayInt32("int32_t_a")
self.uint32_t_a = FLAMEGPU.environment.getPropertyArrayUInt32("uint32_t_a")
self.int16_t_a = FLAMEGPU.environment.getPropertyArrayInt16("int16_t_a")
self.uint16_t_a = FLAMEGPU.environment.getPropertyArrayUInt16("uint16_t_a")
self.int8_t_a = FLAMEGPU.environment.getPropertyArrayInt8("int8_t_a")
self.uint8_t_a = FLAMEGPU.environment.getPropertyArrayUInt8("uint8_t_a")
self.validate_has_run = True
def apply_assertions(self):
assert self.float == 12.0
assert self.double == 13.0
assert self.int64_t == 14
assert self.uint64_t == 15
assert self.int32_t == 16
assert self.uint32_t == 17
assert self.int16_t == 18
assert self.uint16_t == 19
assert self.int8_t == 20
assert self.uint8_t == 21
assert self.float_a == ( 12.0, 0.0, 1.0 )
assert self.double_a == ( 13.0, 0.0, 1.0 )
assert self.int64_t_a == ( 14, 0, 1 )
assert self.uint64_t_a == ( 15, 0, 1 )
assert self.int32_t_a == ( 16, 0, 1 )
assert self.uint32_t_a == ( 17, 0, 1 )
assert self.int16_t_a == ( 18, 0, 1 )
assert self.uint16_t_a == ( 19, 0, 1 )
assert self.int8_t_a == ( 20, 0, 1 )
assert self.uint8_t_a == ( 21, 0, 1 )
assert self.validate_has_run == True
class ResetEnv(pyflamegpu.HostFunctionCallback):
"""
pyflamegpu requires step functions to be a class which extends the StepFunction base class.
This class must extend the run function
"""
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
FLAMEGPU.environment.setPropertyFloat("float", 0)
FLAMEGPU.environment.setPropertyDouble("double", 0)
FLAMEGPU.environment.setPropertyInt64("int64_t", 0)
FLAMEGPU.environment.setPropertyUInt64("uint64_t", 0)
FLAMEGPU.environment.setPropertyInt32("int32_t", 0)
FLAMEGPU.environment.setPropertyUInt32("uint32_t", 0)
FLAMEGPU.environment.setPropertyInt16("int16_t", 0)
FLAMEGPU.environment.setPropertyUInt16("uint16_t", 0)
FLAMEGPU.environment.setPropertyInt8("int8_t", 0)
FLAMEGPU.environment.setPropertyUInt8("uint8_t", 0)
FLAMEGPU.environment.setPropertyArrayFloat("float_a", (0,0,0))
FLAMEGPU.environment.setPropertyArrayDouble("double_a", (0,0,0))
FLAMEGPU.environment.setPropertyArrayInt64("int64_t_a", (0,0,0))
FLAMEGPU.environment.setPropertyArrayUInt64("uint64_t_a", (0,0,0))
FLAMEGPU.environment.setPropertyArrayInt32("int32_t_a", (0,0,0))
FLAMEGPU.environment.setPropertyArrayUInt32("uint32_t_a", (0,0,0))
FLAMEGPU.environment.setPropertyArrayInt16("int16_t_a", (0,0,0))
FLAMEGPU.environment.setPropertyArrayUInt16("uint16_t_a", (0,0,0))
FLAMEGPU.environment.setPropertyArrayInt8("int8_t_a", (0,0,0))
FLAMEGPU.environment.setPropertyArrayUInt8("uint8_t_a", (0,0,0))
def io_test_fixture(IO_FILENAME):
m = pyflamegpu.ModelDescription("test_read_write")
a = m.newAgent("a")
a.newVariableFloat("float")
a.newVariableDouble("double")
a.newVariableInt64("int64_t")
a.newVariableUInt64("uint64_t")
a.newVariableInt32("int32_t")
a.newVariableUInt32("uint32_t")
a.newVariableInt16("int16_t")
a.newVariableUInt16("uint16_t")
a.newVariableInt8("int8_t")
a.newVariableUInt8("uint8_t")
b = m.newAgent("b")
b.newState("1")
b.newState("2")
b.newVariableArrayFloat("float", 3)
b.newVariableArrayDouble("double", 3)
b.newVariableArrayInt64("int64_t", 3)
b.newVariableArrayUInt64("uint64_t", 3)
b.newVariableArrayInt32("int32_t", 3)
b.newVariableArrayUInt32("uint32_t", 3)
b.newVariableArrayInt16("int16_t", 3)
b.newVariableArrayUInt16("uint16_t", 3)
b.newVariableArrayInt8("int8_t", 3)
b.newVariableArrayUInt8("uint8_t", 3)
e = m.Environment()
e.newPropertyFloat("float", 12.0)
e.newPropertyDouble("double", 13.0)
e.newPropertyInt64("int64_t", 14)
e.newPropertyUInt64("uint64_t", 15)
e.newPropertyInt32("int32_t", 16)
e.newPropertyUInt32("uint32_t", 17)
e.newPropertyInt16("int16_t", 18)
e.newPropertyUInt16("uint16_t", 19)
e.newPropertyInt8("int8_t", 20)
e.newPropertyUInt8("uint8_t", 21)
e.newPropertyArrayFloat("float_a", ( 12.0, 0.0, 1.0 ))
e.newPropertyArrayDouble("double_a", ( 13.0, 0.0, 1.0 ))
e.newPropertyArrayInt64("int64_t_a", ( 14, 0, 1 ))
e.newPropertyArrayUInt64("uint64_t_a", ( 15, 0, 1 ))
e.newPropertyArrayInt32("int32_t_a", ( 16, 0, 1 ))
e.newPropertyArrayUInt32("uint32_t_a", ( 17, 0, 1 ))
e.newPropertyArrayInt16("int16_t_a", ( 18, 0, 1))
e.newPropertyArrayUInt16("uint16_t_a", ( 19, 0, 1 ))
e.newPropertyArrayInt8("int8_t_a", ( 20, 0, 1 ))
e.newPropertyArrayUInt8("uint8_t_a", (21, 0, 1))
pop_a_out = pyflamegpu.AgentVector (a, AGENT_COUNT)
for i in range(AGENT_COUNT):
agent = pop_a_out[i]
agent.setVariableFloat("float", float(1.0 + i))
agent.setVariableDouble("double", float(2.0 + i))
agent.setVariableInt64("int64_t", 3 + i)
agent.setVariableUInt64("uint64_t", 4 + i)
agent.setVariableInt32("int32_t", 5 + i)
agent.setVariableUInt32("uint32_t", 6 + i)
agent.setVariableInt16("int16_t", (7 + i))
agent.setVariableUInt16("uint16_t", (8 + i))
agent.setVariableInt8("int8_t", (9 + i))
agent.setVariableUInt8("uint8_t", (10 + i))
pop_b_out = pyflamegpu.AgentVector(b, AGENT_COUNT)
for i in range(AGENT_COUNT):
agent = pop_b_out[i]
agent.setVariableArrayFloat("float", ( 1.0, float(i), 1.0 ))
agent.setVariableArrayDouble("double", ( 2.0, float(i), 1.0 ))
agent.setVariableArrayInt64("int64_t", ( 3, i, 1 ))
agent.setVariableArrayUInt64("uint64_t", ( 4, i, 1 ))
agent.setVariableArrayInt32("int32_t", ( 5, i, 1 ))
agent.setVariableArrayUInt32("uint32_t", ( 6, i, 1 ))
agent.setVariableArrayInt16("int16_t", ( 7, i, 1 ))
agent.setVariableArrayUInt16("uint16_t", ( 8, i, 1 ))
agent.setVariableArrayInt8("int8_t", ( 9, i, 1 ))
agent.setVariableArrayUInt8("uint8_t", ( 10, i, 1 ))
# Add the validate and reset step functions in specific order.
validate = ValidateEnv()
reset = ResetEnv()
m.newLayer().addHostFunctionCallback(validate)
m.newLayer().addHostFunctionCallback(reset)
# Run Export
am_export = pyflamegpu.CUDASimulation(m)
am_export.setPopulationData(pop_a_out)
am_export.setPopulationData(pop_b_out, "2") # Set Variables not in the initial state
# Set config files for export too
am_export.SimulationConfig().input_file = "invalid";
am_export.SimulationConfig().random_seed = 654321;
am_export.SimulationConfig().steps = 123;
am_export.SimulationConfig().timing = True;
am_export.SimulationConfig().verbose = False;
am_export.CUDAConfig().device_id = 0;
am_export.exportData(IO_FILENAME)
del am_export # Delete previous CUDAAgentModel as multiple models with same name cant exist
# Run Import
am = pyflamegpu.CUDASimulation(m)
# Ensure config doesn;t match
am.SimulationConfig().random_seed = 0;
am.SimulationConfig().steps = 0;
am.SimulationConfig().timing = False;
am.SimulationConfig().verbose = True;
# Perform import
am.SimulationConfig().input_file = IO_FILENAME
am.applyConfig()
# Validate config matches
assert am.SimulationConfig().random_seed == 654321
assert am.SimulationConfig().steps == 123
assert am.SimulationConfig().timing == True
assert am.SimulationConfig().verbose == False
assert am.SimulationConfig().input_file == IO_FILENAME
assert am.CUDAConfig().device_id == 0;
pop_a_in = pyflamegpu.AgentVector(a)
pop_b_in = pyflamegpu.AgentVector(b)
am.getPopulationData(pop_a_in)
am.getPopulationData(pop_b_in, "2")
# Valid agent none array vars
assert len(pop_a_in) == len(pop_a_out)
for i in range(len(pop_a_in)):
agent_in = pop_a_in[i]
agent_out = pop_a_out[i]
assert agent_in.getVariableFloat("float") == agent_out.getVariableFloat("float")
assert agent_in.getVariableDouble("double") == agent_out.getVariableDouble("double")
assert agent_in.getVariableInt64("int64_t") == agent_out.getVariableInt64("int64_t")
assert agent_in.getVariableUInt64("uint64_t") == agent_out.getVariableUInt64("uint64_t")
assert agent_in.getVariableInt32("int32_t") == agent_out.getVariableInt32("int32_t")
assert agent_in.getVariableUInt32("uint32_t") == agent_out.getVariableUInt32("uint32_t")
assert agent_in.getVariableInt16("int16_t") == agent_out.getVariableInt16("int16_t")
assert agent_in.getVariableUInt16("uint16_t") == agent_out.getVariableUInt16("uint16_t")
assert agent_in.getVariableInt8("int8_t") == agent_out.getVariableInt8("int8_t")
assert agent_in.getVariableUInt8("uint8_t") == agent_out.getVariableUInt8("uint8_t")
# Valid agent array vars
assert len(pop_b_in) == len(pop_b_out)
for i in range(len(pop_b_in)):
agent_in = pop_b_in[i]
agent_out = pop_b_out[i]
assert agent_in.getVariableArrayFloat("float") == agent_out.getVariableArrayFloat("float")
assert agent_in.getVariableArrayDouble("double") == agent_out.getVariableArrayDouble("double")
assert agent_in.getVariableArrayInt64("int64_t") == agent_out.getVariableArrayInt64("int64_t")
assert agent_in.getVariableArrayUInt64("uint64_t") == agent_out.getVariableArrayUInt64("uint64_t")
assert agent_in.getVariableArrayInt32("int32_t") == agent_out.getVariableArrayInt32("int32_t")
assert agent_in.getVariableArrayUInt32("uint32_t") == agent_out.getVariableArrayUInt32("uint32_t")
assert agent_in.getVariableArrayInt16("int16_t") == agent_out.getVariableArrayInt16("int16_t")
assert agent_in.getVariableArrayUInt16("uint16_t") == agent_out.getVariableArrayUInt16("uint16_t")
assert agent_in.getVariableArrayInt8("int8_t") == agent_out.getVariableArrayInt8("int8_t")
assert agent_in.getVariableArrayUInt8("uint8_t") == agent_out.getVariableArrayUInt8("uint8_t")
del am # Delete previous CUDAAgentModel as multiple models with same name cant exist
# Create seperate instance to validate env vars
am = pyflamegpu.CUDASimulation(m)
# Step once, this checks and clears env vars
am.step()
# check step function assertions (with values loaded from runtime env)
validate.apply_assertions()
# Reload env vars from file
am.SimulationConfig().input_file = IO_FILENAME
am.applyConfig()
# Step again, check they have been loaded
am.step()
# check step function assertions (with values loaded from runtime env)
validate.apply_assertions()
# Cleanup
os.remove(IO_FILENAME)
class IOTest(TestCase):
def test_xml_read_write(self):
io_test_fixture(XML_FILE_NAME);
def test_json_read_write(self):
io_test_fixture(JSON_FILE_NAME); |
the-stack_106_30295 | # Copyright (c) 2020 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import attr
@attr.s(auto_attribs=True)
class SimpleStats:
scanner_name: str
total_boxes: int = 0
total_missed_boxes: int = 0
total_noses: int = 0
total_missed_noses: int = 0
def add(self, total_boxes, total_missed_boxes, total_noses, total_missed_noses):
self.total_boxes += total_boxes
self.total_missed_boxes += total_missed_boxes
self.total_noses += total_noses
self.total_missed_noses += total_missed_noses
def __str__(self, infix=False):
infix = f'[{infix}] ' if infix else ""
return (f"{infix}"
f"Undetected faces: {self.total_missed_noses}/{self.total_noses}, "
f"False face detections: {self.total_missed_boxes}/{self.total_boxes}")
|
the-stack_106_30296 | import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from pprint import pprint
from numpy.random import choice
from scipy.spatial.distance import cdist
# Make sure that caffe is on the python path:
caffe_root = '../../caffe/' # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
test_listfile = '/scratch/16824/data/testlist_class.txt'
result_file = 'cls_results.txt'
caffe.set_device(0)
caffe.set_mode_gpu()
net = caffe.Net('/home/sbokhari/assignment1/py_scripts/train_val_p7.prototxt',
'/home/sbokhari/assignment1/py_scripts/models/model_p7.caffemodel',
caffe.TEST)
test_list = np.loadtxt(test_listfile, str, comments=None, delimiter='\n')
data_counts = len(test_list)
batch_size = net.blobs['data'].data.shape[0]
batch_count = int(np.ceil(data_counts * 1.0 / batch_size))
accuracy = 0
fnames = []
lsize = net.blobs['fc7'].data.shape
wholeset = []
for i in range(batch_count):
out = net.forward()
data = net.blobs['fc7'].data
print(data)
for j in range(batch_size):
id = i * batch_size + j
if id >= data_counts:
break
fname = test_list[id].split(' ')[0]
fnames.append(fname)
wholeset.append(np.copy(data[j,:].reshape((1,lsize[1]))))
wholeset = np.concatenate(tuple(wholeset))
files = ['NN_1/', 'NN_2/', 'NN_3/']
imgsIds = choice(len(fnames), len(files))
imgFeats = wholeset[imgsIds, :]
dists = cdist(imgFeats, wholeset, 'cosine')
sortids = np.argsort(dists, axis=1)
sorts = np.sort(dists, axis=1)
for i in range(len(files)):
print("Image {0}: {1}".format(i+1, fnames[imgsIds[i]]))
for j in range(11): #sortids[i,0:10]:
print("NN {0} at {2}: {1}".format(j, fnames[sortids[i,j]], dists[i,int(sortids[i,j])]))
img = Image.open('/scratch/16824/data/crop_imgs/'+fnames[sortids[i,j]])
img.save(files[i]+'nn'+str(j)+'.jpg')
|
the-stack_106_30297 | # Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import pytest
import torch
import torch.distributed as dist
@pytest.fixture(autouse=True)
def manual_seed_zero():
torch.manual_seed(0)
@pytest.fixture(scope="session")
def cuda_sleep():
# Warm-up CUDA.
torch.empty(1, device="cuda")
# From test/test_cuda.py in PyTorch.
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
def cuda_sleep(seconds):
torch.cuda._sleep(int(seconds * cycles_per_ms * 1000))
return cuda_sleep
def pytest_report_header():
return f"torch: {torch.__version__}"
@pytest.fixture
def setup_rpc(scope="session"):
file = tempfile.NamedTemporaryFile()
dist.rpc.init_rpc(
name="worker0",
rank=0,
world_size=1,
rpc_backend_options=dist.rpc.TensorPipeRpcBackendOptions(
init_method="file://{}".format(file.name),
)
)
yield
dist.rpc.shutdown()
def pytest_ignore_collect(path, config):
"Skip this directory if distributed modules are not enabled."
return not dist.is_available()
|
the-stack_106_30299 | #=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Markov chain Monte Carlo simulation framework.
DESCRIPTION
This module provides a framework for equilibrium sampling from a given thermodynamic state of
a biomolecule using a Markov chain Monte Carlo scheme.
CAPABILITIES
* Langevin dynamics [assumed to be free of integration error; use at your own risk]
* hybrid Monte Carlo
* generalized hybrid Monte Carlo
NOTES
This is still in development.
REFERENCES
[1] Jun S. Liu. Monte Carlo Strategies in Scientific Computing. Springer, 2008.
EXAMPLES
Construct a simple MCMC simulation using Langevin dynamics moves.
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a thermodynamic state.
>>> import simtk.unit as u
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a move set.
>>> move_set = [ HMCMove(nsteps=10), LangevinDynamicsMove(nsteps=10) ]
>>> # Create MCMC sampler
>>> sampler = MCMCSampler(thermodynamic_state, move_set=move_set)
>>> # Run a number of iterations of the sampler.
>>> updated_sampler_state = sampler.run(sampler_state, 10)
TODO
* Split this into a separate package, with individual files for each move type.
COPYRIGHT AND LICENSE
@author John D. Chodera <[email protected]>
All code in this repository is released under the GNU General Public License.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
TODO
----
* Recognize when MonteCarloBarostat is in use with system.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import copy
import time
import numpy as np
import simtk
import simtk.openmm as mm
import simtk.unit as u
from openmmtools import integrators
from openmmmcmc import thermodynamics
from openmmmcmc.timing import Timer
from abc import abstractmethod
import logging
logger = logging.getLogger(__name__)
#=============================================================================================
# MODULE CONSTANTS
#=============================================================================================
_RANDOM_SEED_MAX = np.iinfo(np.int32).max # maximum random number seed value
#=============================================================================================
# MCMC sampler state
#=============================================================================================
class SamplerState(object):
"""
Sampler state for MCMC move representing everything that may be allowed to change during
the simulation.
Parameters
----------
system : simtk.openmm.System
Current system specifying force calculations.
positions : array of simtk.unit.Quantity compatible with nanometers
Particle positions.
velocities : optional, array of simtk.unit.Quantity compatible with nanometers/picoseconds
Particle velocities.
box_vectors : optional, 3x3 array of simtk.unit.Quantity compatible with nanometers
Current box vectors.
Fields
------
system : simtk.openmm.System
Current system specifying force calculations.
positions : array of simtk.unit.Quantity compatible with nanometers
Particle positions.
velocities : optional, array of simtk.unit.Quantity compatible with nanometers/picoseconds
Particle velocities.
box_vectors : optional, 3x3 array of simtk.unit.Quantity compatible with nanometers
Current box vectors.
potential_energy : optional, simtk.unit.Quantity compatible with kilocalories_per_mole
Current potential energy.
kinetic_energy : optional, simtk.unit.Quantity compatible with kilocalories_per_mole
Current kinetic energy.
total_energy : optional, simtk.unit.Quantity compatible with kilocalories_per_mole
Current total energy.
platform : optional, simtk.openmm.Platform
Platform to use for Context creation to initialize sampler state.
Examples
--------
Create a sampler state for a system with box vectors.
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.LennardJonesFluid()
>>> # Create a sampler state manually.
>>> box_vectors = test.system.getDefaultPeriodicBoxVectors()
>>> sampler_state = SamplerState(system=test.system, positions=test.positions, box_vectors=box_vectors)
Create a sampler state for a system without box vectors.
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.LennardJonesCluster()
>>> # Create a sampler state manually.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
TODO:
* Can we remove the need to create a Context in initializing the sampler state by using the Reference platform and skipping energy calculations?
"""
def __init__(self, system, positions, velocities=None, box_vectors=None, platform=None):
self.system = copy.deepcopy(system)
self.positions = positions
self.velocities = velocities
self.box_vectors = box_vectors
# Create Context.
context = self.createContext(platform=platform)
# Get state.
openmm_state = context.getState(getPositions=True, getVelocities=True, getEnergy=True)
# Populate context.
self.positions = openmm_state.getPositions(asNumpy=True)
self.velocities = openmm_state.getVelocities(asNumpy=True)
self.box_vectors = openmm_state.getPeriodicBoxVectors(asNumpy=False)
self.potential_energy = openmm_state.getPotentialEnergy()
self.kinetic_energy = openmm_state.getKineticEnergy()
self.total_energy = self.potential_energy + self.kinetic_energy
self.volume = thermodynamics.volume(self.box_vectors)
# Clean up.
del context
@classmethod
def createFromContext(cls, context):
"""
Create an SamplerState object from the information in a current OpenMM Context object.
Parameters
----------
context : simtk.openmm.Context
The Context object from which to create a sampler state.
Returns
-------
sampler_state : SamplerState
The sampler state containing positions, velocities, and box vectors.
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a Context.
>>> import simtk.openmm as mm
>>> import simtk.unit as u
>>> integrator = mm.VerletIntegrator(1.0 * u.femtoseconds)
>>> platform = mm.Platform.getPlatformByName('Reference')
>>> context = mm.Context(test.system, integrator, platform)
>>> # Set positions and velocities.
>>> context.setPositions(test.positions)
>>> context.setVelocitiesToTemperature(298 * u.kelvin)
>>> # Create a sampler state from the Context.
>>> sampler_state = SamplerState.createFromContext(context)
>>> # Clean up.
>>> del context, integrator
"""
# Get state.
openmm_state = context.getState(getPositions=True, getVelocities=True, getEnergy=True)
# Create new object, bypassing init.
self = SamplerState.__new__(cls)
# Populate context.
self.system = copy.deepcopy(context.getSystem())
self.positions = openmm_state.getPositions(asNumpy=True)
self.velocities = openmm_state.getVelocities(asNumpy=True)
self.box_vectors = openmm_state.getPeriodicBoxVectors(asNumpy=True)
self.potential_energy = openmm_state.getPotentialEnergy()
self.kinetic_energy = openmm_state.getKineticEnergy()
self.total_energy = self.potential_energy + self.kinetic_energy
self.volume = thermodynamics.volume(self.box_vectors)
return self
def createContext(self, integrator=None, platform=None):
"""
Create an OpenMM Context object from the current sampler state.
Parameters
----------
integrator : simtk.openmm.Integrator, optional, default=None
The integrator to use for Context creation.
If not specified, a VerletIntegrator with 1 fs timestep is created.
platform : simtk.openmm.Platform, optional, default=None
If specified, the Platform to use for context creation.
Returns
-------
context : simtk.openmm.Context
The created OpenMM Context object
Notes
-----
If the selected or default platform fails, the CPU and Reference platforms will be tried, in that order.
Examples
--------
Create a context for a system with periodic box vectors.
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.LennardJonesFluid()
>>> # Create a sampler state manually.
>>> box_vectors = test.system.getDefaultPeriodicBoxVectors()
>>> sampler_state = SamplerState(positions=test.positions, box_vectors=box_vectors, system=test.system)
>>> # Create a Context.
>>> import simtk.openmm as mm
>>> import simtk.unit as u
>>> integrator = mm.VerletIntegrator(1.0*u.femtoseconds)
>>> context = sampler_state.createContext(integrator)
>>> # Clean up.
>>> del context
Create a context for a system without periodic box vectors.
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.LennardJonesCluster()
>>> # Create a sampler state manually.
>>> sampler_state = SamplerState(positions=test.positions, system=test.system)
>>> # Create a Context.
>>> import simtk.openmm as mm
>>> import simtk.unit as u
>>> integrator = mm.VerletIntegrator(1.0*u.femtoseconds)
>>> context = sampler_state.createContext(integrator)
>>> # Clean up.
>>> del context
TODO
----
* Generalize fallback platform order to [CUDA, OpenCL, CPU, Reference] ordering.
"""
if not self.system:
raise Exception("SamplerState must have a 'system' object specified to create a Context")
# Use a Verlet integrator if none is specified.
if integrator is None:
integrator = mm.VerletIntegrator(1.0 * u.femtoseconds)
# Create a Context.
if platform:
context = mm.Context(self.system, integrator, platform)
else:
context = mm.Context(self.system, integrator)
# Set box vectors, if specified.
if (self.box_vectors is not None):
try:
# try tuple of box vectors
context.setPeriodicBoxVectors(self.box_vectors[0], self.box_vectors[1], self.box_vectors[2])
except:
# try numpy 3x3 matrix of box vectors
context.setPeriodicBoxVectors(self.box_vectors[0,:], self.box_vectors[1,:], self.box_vectors[2,:])
# Set positions.
context.setPositions(self.positions)
# Set velocities, if specified.
if (self.velocities is not None):
context.setVelocities(self.velocities)
return context
def minimize(self, tolerance=None, maxIterations=None, platform=None):
"""
Minimize the current configuration.
Parameters
----------
tolerance : simtk.unit.Quantity compatible with kilocalories_per_mole/anstroms, optional, default = 1*kilocalories_per_mole/anstrom
Tolerance to use for minimization termination criterion.
maxIterations : int, optional, default = 100
Maximum number of iterations to use for minimization.
platform : simtk.openmm.Platform, optional
Platform to use for minimization.
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Minimize
>>> sampler_state.minimize()
"""
timer = Timer()
if (tolerance is None):
tolerance = 1.0 * u.kilocalories_per_mole / u.angstroms
if (maxIterations is None):
maxIterations = 100
# Use LocalEnergyMinimizer
from simtk.openmm import LocalEnergyMinimizer
timer.start("Context creation")
context = self.createContext(platform=platform)
logger.debug("LocalEnergyMinimizer: platform is %s" % context.getPlatform().getName())
logger.debug("Minimizing with tolerance %s and %d max. iterations." % (tolerance, maxIterations))
timer.stop("Context creation")
timer.start("LocalEnergyMinimizer minimize")
LocalEnergyMinimizer.minimize(context, tolerance, maxIterations)
timer.stop("LocalEnergyMinimizer minimize")
# Retrieve data.
sampler_state = SamplerState.createFromContext(context)
self.positions = sampler_state.positions
self.potential_energy = sampler_state.potential_energy
self.total_energy = sampler_state.total_energy
del context
timer.report_timing()
return
def has_nan(self):
"""Return True if any of the generalized coordinates are nan.
Notes
-----
Currently checks only the positions.
"""
x = self.positions / u.nanometers
if np.any(np.isnan(x)):
return True
else:
return False
#=============================================================================================
# Monte Carlo Move abstract base class
#=============================================================================================
class MCMCMove(object):
"""
Markov chain Monte Carlo (MCMC) move abstract base class.
Markov chain Monte Carlo (MCMC) simulations are constructed from a set of derived objects.
"""
@abstractmethod
def apply(self, thermodynamic_state, sampler_state, platform=None):
"""
Apply the MCMC move.
Parameters
----------
thermodynamic_state : ThermodynamicState
The thermodynamic state to use when applying the MCMC move
sampler_state : SamplerState
The sampler state to apply the move to
platform : simtk.openmm.Platform, optional, default = None
The platform to use.
Returns
-------
updated_sampler_state : SamplerState
The updated sampler state
"""
pass
#=============================================================================================
# Markov chain Monte Carlo sampler
#=============================================================================================
class MCMCSampler(object):
"""
Markov chain Monte Carlo sampler.
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a thermodynamic state.
>>> import simtk.unit as u
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a move set specifying probabilities fo each type of move.
>>> move_set = { HMCMove(nsteps=10) : 0.5, LangevinDynamicsMove(nsteps=10) : 0.5 }
>>> # Create MCMC sampler
>>> sampler = MCMCSampler(thermodynamic_state, move_set=move_set)
>>> # Run a number of iterations of the sampler.
>>> updated_sampler_state = sampler.run(sampler_state, 10)
"""
def __init__(self, thermodynamic_state, move_set=None, platform=None):
"""
Initialize a Markov chain Monte Carlo sampler.
Parameters
----------
thermodynamic_state : ThermodynamicState
Thermodynamic state to sample during MCMC run.
move_set : container of MarkovChainMonteCarloMove objects
Moves to attempt during MCMC run.
If list or tuple, will run all moves each iteration in specified sequence. (e.g. [move1, move2, move3])
if dict, will use specified unnormalized weights (e.g. { move1 : 0.3, move2 : 0.5, move3, 0.9 })
platform : simtk.openmm.Platform, optional, default = None
If specified, the Platform to use for simulations.
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a thermodynamic state.
>>> import simtk.unit as u
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
Create a move set specifying probabilities for each type of move.
>>> move_set = { HMCMove() : 0.5, LangevinDynamicsMove() : 0.5 }
>>> # Create MCMC sampler
>>> sampler = MCMCSampler(thermodynamic_state, move_set=move_set)
Create a move set specifying an order of moves.
>>> move_set = [ HMCMove(), LangevinDynamicsMove(), HMCMove() ]
>>> # Create MCMC sampler
>>> sampler = MCMCSampler(thermodynamic_state, move_set=move_set)
"""
# Store thermodynamic state.
self.thermodynamic_state = thermodynamic_state
# Store the move set.
if type(move_set) not in [list, dict]:
raise Exception("move_set must be list or dict")
# TODO: Make deep copy of the move set?
self.move_set = move_set
self.platform = platform
return
def run(self, sampler_state, niterations=1):
"""
Run the sampler for a specified number of iterations.
Parameters
----------
sampler_state : SamplerState
The current state of the sampler.
niterations : int
Number of iterations of the sampler to run.
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a thermodynamic state.
>>> import simtk.unit as u
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a move set specifying probabilities fo each type of move.
>>> move_set = { HMCMove(nsteps=10) : 0.5, LangevinDynamicsMove(nsteps=10) : 0.5 }
>>> # Create MCMC sampler
>>> sampler = MCMCSampler(thermodynamic_state, move_set=move_set)
>>> # Run a number of iterations of the sampler.
>>> updated_sampler_state = sampler.run(sampler_state, 10)
"""
# Make a deep copy of the sampler state so that initial state is unchanged.
# TODO: This seems to cause problems. Let's figure this out later.
sampler_state = copy.deepcopy(sampler_state)
# Generate move sequence.
move_sequence = list()
if type(self.move_set) == list:
# Sequential moves.
for iteration in range(niterations):
for move in self.move_set:
move_sequence.append(move)
elif type(self.move_set) == dict:
# Random moves.
moves = list(self.move_set)
weights = np.array([self.move_set[move] for move in moves])
weights /= weights.sum() # normalize
move_sequence = np.random.choice(moves, size=niterations, p=weights)
sampler_state.system = self.thermodynamic_state.system # HACK!
# Apply move sequence.
for move in move_sequence:
sampler_state = move.apply(self.thermodynamic_state, sampler_state, platform=self.platform)
# Return the updated sampler state.
return sampler_state
def update_thermodynamic_state(self, thermodynamic_state):
"""
Update the thermodynamic state.
Parameters
----------
thermodynamic_state : ThermodynamicState
Thermodynamic state to sample during MCMC run.
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a thermodynamic state.
>>> import simtk.unit as u
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a move set specifying probabilities fo each type of move.
>>> move_set = { HMCMove(nsteps=10) : 0.5, LangevinDynamicsMove(nsteps=10) : 0.5 }
>>> # Create MCMC sampler
>>> sampler = MCMCSampler(thermodynamic_state, move_set=move_set)
>>> # Run a number of iterations of the sampler.
>>> updated_sampler_state = sampler.run(sampler_state, 10)
Update the thermodynamic state.
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=310*u.kelvin)
>>> sampler.update_thermodynamic_state(thermodynamic_state)
"""
# Store thermodynamic state.
self.thermodynamic_state = thermodynamic_state
#=============================================================================================
# Langevin dynamics move
#=============================================================================================
class LangevinDynamicsMove(MCMCMove):
"""
Langevin dynamics segment as a (pseudo) Monte Carlo move.
This move assigns a velocity from the Maxwell-Boltzmann distribution and executes a number
of Maxwell-Boltzmann steps to propagate dynamics. This is not a *true* Monte Carlo move,
in that the generation of the correct distribution is only exact in the limit of infinitely
small timestep; in other words, the discretization error is assumed to be negligible. Use
HybridMonteCarloMove instead to ensure the exact distribution is generated.
Warning
-------
No Metropolization is used to ensure the correct phase space distribution is sampled.
This means that timestep-dependent errors will remain uncorrected, and are amplified with larger timesteps.
Use this move at your own risk!
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a thermodynamic state.
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create a LangevinDynamicsMove
>>> move = LangevinDynamicsMove(nsteps=10)
>>> # Perform one update of the sampler state.
>>> updated_sampler_state = move.apply(thermodynamic_state, sampler_state)
"""
def __init__(self, timestep=1.0*simtk.unit.femtosecond, collision_rate=10.0/simtk.unit.picoseconds, nsteps=1000, reassign_velocities=False):
"""
Parameters
----------
timestep : simtk.unit.Quantity compatible with femtoseconds, optional, default = 1*simtk.unit.femtoseconds
The timestep to use for Langevin integration.
collision_rate : simtk.unit.Quantity compatible with 1/picoseconds, optional, default = 10/simtk.unit.picoseconds
The collision rate with fictitious bath particles.
nsteps : int, optional, default = 1000
The number of integration timesteps to take each time the move is applied.
reassign_velocities : bool, optional, default = False
If True, the velocities will be reassigned from the Maxwell-Boltzmann distribution at the beginning of the move.
Note
----
The temperature of the thermodynamic state is used in Langevin dynamics.
If a barostat is present, the temperature and pressure will be set to the thermodynamic state being simulated.
Examples
--------
Create a Langevin move with default parameters.
>>> move = LangevinDynamicsMove()
Create a Langevin move with specified parameters.
>>> move = LangevinDynamicsMove(timestep=0.5*u.femtoseconds, collision_rate=20.0/u.picoseconds, nsteps=100)
"""
self.timestep = timestep
self.collision_rate = collision_rate
self.nsteps = nsteps
self.reassign_velocities = reassign_velocities
return
def apply(self, thermodynamic_state, sampler_state, platform=None):
"""
Apply the Langevin dynamics MCMC move.
Parameters
----------
thermodynamic_state : ThermodynamicState
The thermodynamic state to use when applying the MCMC move
sampler_state : SamplerState
The sampler state to apply the move to
platform : simtk.openmm.Platform, optional, default = None
If not None, the specified platform will be used.
Returns
-------
updated_sampler_state : SamplerState
The updated sampler state
Examples
--------
Alanine dipeptide in vacuum.
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a thermodynamic state.
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create a LangevinDynamicsMove
>>> move = LangevinDynamicsMove(nsteps=10, timestep=0.5*u.femtoseconds, collision_rate=20.0/u.picoseconds)
>>> # Perform one update of the sampler state.
>>> updated_sampler_state = move.apply(thermodynamic_state, sampler_state)
Ideal gas.
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.IdealGas()
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a thermodynamic state.
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create a LangevinDynamicsMove
>>> move = LangevinDynamicsMove(nsteps=500, timestep=0.5*u.femtoseconds, collision_rate=20.0/u.picoseconds)
>>> # Perform one update of the sampler state.
>>> updated_sampler_state = move.apply(thermodynamic_state, sampler_state)
"""
timer = Timer()
# Check if the system contains a barostat.
system = sampler_state.system
forces = { system.getForce(index).__class__.__name__ : system.getForce(index) for index in range(system.getNumForces()) }
barostat = None
if 'MonteCarloBarostat' in forces:
barostat = forces['MonteCarloBarostat']
barostat.setDefaultTemperature(thermodynamic_state.temperature)
parameter_name = barostat.Pressure()
if thermodynamic_state.pressure == None:
raise Exception('MonteCarloBarostat is present but no pressure specified in thermodynamic state.')
# Create integrator.
integrator = mm.LangevinIntegrator(thermodynamic_state.temperature, self.collision_rate, self.timestep)
# Random number seed.
seed = np.random.randint(_RANDOM_SEED_MAX)
integrator.setRandomNumberSeed(seed)
# Create context.
timer.start("Context Creation")
context = sampler_state.createContext(integrator, platform=platform)
timer.stop("Context Creation")
logger.debug("LangevinDynamicMove: Context created, platform is %s" % context.getPlatform().getName())
# Set pressure, if barostat is included.
if barostat is not None:
context.setParameter(parameter_name, thermodynamic_state.pressure.value_in_unit_system(u.md_unit_system))
if self.reassign_velocities:
# Assign Maxwell-Boltzmann velocities.
context.setVelocitiesToTemperature(thermodynamic_state.temperature)
# Run dynamics.
timer.start("step()")
integrator.step(self.nsteps)
timer.stop("step()")
# Get updated sampler state.
timer.start("update_sampler_state")
updated_sampler_state = SamplerState.createFromContext(context)
timer.start("update_sampler_state")
# Clean up.
del context
timer.report_timing()
return updated_sampler_state
#=============================================================================================
# Genaralized Hybrid Monte Carlo (GHMC, a form of Metropolized Langevin dynamics) move
#=============================================================================================
class GHMCMove(MCMCMove):
"""
Generalized hybrid Monte Carlo (GHMC) Markov chain Monte Carlo move
This move uses generalized Hybrid Monte Carlo (GHMC), a form of Metropolized Langevin
dynamics, to propagate the system.
References
----------
Lelievre T, Stoltz G, and Rousset M. Free Energy Computations: A Mathematical Perspective
http://www.amazon.com/Free-Energy-Computations-Mathematical-Perspective/dp/1848162472
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Minimize.
>>> sampler_state.minimize()
>>> # Create a thermodynamic state.
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create a GHMC move
>>> move = GHMCMove(nsteps=10)
>>> # Perform one update of the sampler state.
>>> updated_sampler_state = move.apply(thermodynamic_state, sampler_state)
"""
def __init__(self, timestep=1.0*simtk.unit.femtosecond, collision_rate=20.0/simtk.unit.picoseconds, nsteps=1000):
"""
Parameters
----------
timestep : simtk.unit.Quantity compatible with femtoseconds, optional, default = 1*simtk.unit.femtoseconds
The timestep to use for Langevin integration.
collision_rate : simtk.unit.Quantity compatible with 1/picoseconds, optional, default = 10/simtk.unit.picoseconds
The collision rate with fictitious bath particles.
nsteps : int, optional, default = 1000
The number of integration timesteps to take each time the move is applied.
Note
----
The temperature of the thermodynamic state is used.
Examples
--------
Create a GHMC move with default parameters.
>>> move = GHMCMove()
Create a GHMC move with specified parameters.
>>> move = GHMCMove(timestep=0.5*u.femtoseconds, collision_rate=20.0/u.picoseconds, nsteps=100)
"""
self.timestep = timestep
self.collision_rate = collision_rate
self.nsteps = nsteps
self.reset_statistics()
return
def reset_statistics(self):
"""
Reset the internal statistics of number of accepted and attempted moves.
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a thermodynamic state.
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create a LangevinDynamicsMove
>>> move = GHMCMove(nsteps=10, timestep=1.0*u.femtoseconds, collision_rate=20.0/u.picoseconds)
>>> # Perform one update of the sampler state.
>>> updated_sampler_state = move.apply(thermodynamic_state, sampler_state)
Reset statistics.
>>> move.reset_statistics()
"""
self.naccepted = 0 # number of accepted steps
self.nattempted = 0 # number of attempted steps
return
def get_statistics(self):
"""
Return the current acceptance/rejection statistics of the sampler.
Returns
-------
naccepted : int
The number of accepted steps
nattempted : int
The number of attempted steps
faction_accepted : float
The fraction of steps accepted.
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a thermodynamic state.
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create a LangevinDynamicsMove
>>> move = GHMCMove(nsteps=10, timestep=1.0*u.femtoseconds, collision_rate=20.0/u.picoseconds)
>>> # Perform one update of the sampler state.
>>> updated_sampler_state = move.apply(thermodynamic_state, sampler_state)
Get statistics.
>>> [naccepted, nattempted, fraction_accepted] = move.get_statistics()
"""
return (self.naccepted, self.nattempted, float(self.naccepted) / float(self.nattempted))
def apply(self, thermodynamic_state, sampler_state, platform=None):
"""
Apply the GHMC MCMC move.
Parameters
----------
thermodynamic_state : ThermodynamicState
The thermodynamic state to use when applying the MCMC move
sampler_state : SamplerState
The sampler state to apply the move to
platform : simtk.openmm.Platform, optional, default = None
If not None, the specified platform will be used.
Returns
-------
updated_sampler_state : SamplerState
The updated sampler state
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a thermodynamic state.
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create a LangevinDynamicsMove
>>> move = GHMCMove(nsteps=10, timestep=1.0*u.femtoseconds, collision_rate=20.0/u.picoseconds)
>>> # Perform one update of the sampler state.
>>> updated_sampler_state = move.apply(thermodynamic_state, sampler_state)
"""
timer = Timer()
# Create integrator.
integrator = integrators.GHMCIntegrator(temperature=thermodynamic_state.temperature, collision_rate=self.collision_rate, timestep=self.timestep)
# Random number seed.
seed = np.random.randint(_RANDOM_SEED_MAX)
integrator.setRandomNumberSeed(seed)
# Create context.
timer.start("Context Creation")
context = sampler_state.createContext(integrator, platform=platform)
timer.stop("Context Creation")
# TODO: Enforce constraints?
#tol = 1.0e-8
#context.applyConstraints(tol)
#context.applyVelocityConstraints(tol)
# Run dynamics.
timer.start("step()")
integrator.step(self.nsteps)
timer.stop("step()")
# Get updated sampler state.
timer.start("update_sampler_state")
updated_sampler_state = SamplerState.createFromContext(context)
timer.start("update_sampler_state")
# Accumulate acceptance statistics.
ghmc_global_variables = { integrator.getGlobalVariableName(index) : index for index in range(integrator.getNumGlobalVariables()) }
naccepted = integrator.getGlobalVariable(ghmc_global_variables['naccept'])
nattempted = integrator.getGlobalVariable(ghmc_global_variables['ntrials'])
self.naccepted += naccepted
self.nattempted += nattempted
# DEBUG.
#print " GHMC accepted %d / %d (%.1f%%)" % (naccepted, nattempted, float(naccepted) / float(nattempted) * 100.0)
# Clean up.
del context
timer.report_timing()
return updated_sampler_state
#=============================================================================================
# Hybrid Monte Carlo move
#=============================================================================================
class HMCMove(MCMCMove):
"""
Hybrid Monte Carlo dynamics.
This move assigns a velocity from the Maxwell-Boltzmann distribution and executes a number
of velocity Verlet steps to propagate dynamics.
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a thermodynamic state.
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create an HMC move.
>>> move = HMCMove(nsteps=10)
>>> # Perform one update of the sampler state.
>>> updated_sampler_state = move.apply(thermodynamic_state, sampler_state)
"""
def __init__(self, timestep=1.0*simtk.unit.femtosecond, nsteps=1000):
"""
Parameters
----------
timestep : simtk.unit.Quantity compatible with femtoseconds, optional, default = 1*femtosecond
The timestep to use for HMC dynamics (which uses velocity Verlet following velocity randomization)
nsteps : int, optional, default = 1000
The number of dynamics steps to take before Metropolis acceptance/rejection.
Examples
--------
Create an HMC move with default timestep and number of steps.
>>> move = HMCMove()
Create an HMC move with specified timestep and number of steps.
>>> move = HMCMove(timestep=0.5*u.femtoseconds, nsteps=500)
"""
self.timestep = timestep
self.nsteps = nsteps
return
def apply(self, thermodynamic_state, sampler_state, platform=None):
"""
Apply the MCMC move.
Parameters
----------
thermodynamic_state : ThermodynamicState
The thermodynamic state to use when applying the MCMC move
sampler_state : SamplerState
The sampler state to apply the move to
platform : simtk.openmm.Platform, optional, default = None
If not None, the specified platform will be used.
Returns
-------
updated_sampler_state : SamplerState
The updated sampler state
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a thermodynamic state.
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin)
>>> # Create an HMC move.
>>> move = HMCMove(nsteps=10, timestep=0.5*u.femtoseconds)
>>> # Perform one update of the sampler state.
>>> updated_sampler_state = move.apply(thermodynamic_state, sampler_state)
"""
timer = Timer()
# Create integrator.
integrator = integrators.HMCIntegrator(temperature=thermodynamic_state.temperature, timestep=self.timestep, nsteps=self.nsteps)
# Random number seed.
seed = np.random.randint(_RANDOM_SEED_MAX)
integrator.setRandomNumberSeed(seed)
# Create context.
timer.start("Context Creation")
context = sampler_state.createContext(integrator, platform=platform)
timer.stop("Context Creation")
# Run dynamics.
# Note that ONE step of this integrator is equal to self.nsteps of velocity Verlet dynamics followed by Metropolis accept/reject.
timer.start("HMC integration")
integrator.step(1)
timer.stop("HMC integration")
# Get sampler state.
timer.start("updated_sampler_state")
updated_sampler_state = SamplerState.createFromContext(context)
timer.stop("updated_sampler_state")
# Clean up.
del context
timer.report_timing()
# Return updated sampler state.
return updated_sampler_state
#=============================================================================================
# Monte Carlo barostat move
#=============================================================================================
class MonteCarloBarostatMove(MCMCMove):
"""
Monte Carlo barostat move.
This move makes one or more attempts to update the box volume using Monte Carlo updates.
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.LennardJonesFluid(nparticles=200)
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions, box_vectors=test.system.getDefaultPeriodicBoxVectors())
>>> # Create a thermodynamic state.
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin, pressure=1*u.atmospheres)
>>> # Create a move set that includes a Monte Carlo barostat move.
>>> move_set = [ GHMCMove(nsteps=50), MonteCarloBarostatMove(nattempts=5) ]
>>> # Simulate on Reference platform.
>>> import simtk.openmm as mm
>>> platform = mm.Platform.getPlatformByName('Reference')
>>> sampler = MCMCSampler(thermodynamic_state, move_set=move_set, platform=platform)
>>> # Run a number of iterations of the sampler.
>>> updated_sampler_state = sampler.run(sampler_state, 2)
"""
def __init__(self, nattempts=5):
"""
Parameters
----------
nattempts : int
The number of Monte Carlo attempts to make to adjust the box volume.
Examples
--------
Create a Monte Carlo barostat move with default parameters.
>>> move = MonteCarloBarostatMove()
Create a Monte Carlo barostat move with specified parameters.
>>> move = MonteCarloBarostatMove(nattempts=10)
"""
self.nattempts = nattempts
return
def apply(self, thermodynamic_state, sampler_state, platform=None):
"""
Apply the MCMC move.
Parameters
----------
thermodynamic_state : ThermodynamicState
The thermodynamic state to use when applying the MCMC move
sampler_state : SamplerState
The sampler state to apply the move to
platform : simtk.openmm.Platform, optional, default = None
If not None, the specified platform will be used.
Returns
-------
updated_sampler_state : SamplerState
The updated sampler state
Examples
--------
>>> # Create a test system
>>> from openmmtools import testsystems
>>> test = testsystems.LennardJonesFluid()
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions, box_vectors=test.system.getDefaultPeriodicBoxVectors())
>>> # Create a thermodynamic state.
>>> from openmmmcmc.thermodynamics import ThermodynamicState
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298*u.kelvin, pressure=1*u.atmospheres)
>>> # Create a Monte Carlo Barostat move.
>>> move = MonteCarloBarostatMove(nattempts=5)
>>> # Perform one update of the sampler state.
>>> updated_sampler_state = move.apply(thermodynamic_state, sampler_state)
"""
timer = Timer()
# Make sure system contains a barostat.
system = sampler_state.system
forces = { system.getForce(index).__class__.__name__ : system.getForce(index) for index in range(system.getNumForces()) }
old_barostat_frequency = None
if 'MonteCarloBarostat' in forces:
force = forces['MonteCarloBarostat']
force.setDefaultTemperature(thermodynamic_state.temperature)
old_barostat_frequency = force.getFrequency()
force.setFrequency(1)
parameter_name = force.Pressure()
else:
# Add MonteCarloBarostat.
force = mm.MonteCarloBarostat(thermodynamic_state.pressure, thermodynamic_state.temperature, 1)
system.addForce(force)
parameter_name = force.Pressure()
# Create integrator.
integrator = integrators.DummyIntegrator()
# Random number seed.
seed = np.random.randint(_RANDOM_SEED_MAX)
force.setRandomNumberSeed(seed)
# Create context.
timer.start("Context Creation")
context = sampler_state.createContext(integrator, platform=platform)
timer.stop("Context Creation")
# Set pressure.
context.setParameter(parameter_name, thermodynamic_state.pressure)
# Run update.
# Note that ONE step of this integrator is equal to self.nsteps of velocity Verlet dynamics followed by Metropolis accept/reject.
timer.start("step(1)")
integrator.step(self.nattempts)
timer.stop("step(1)")
# Get sampler state.
timer.start("update_sampler_state")
updated_sampler_state = SamplerState.createFromContext(context)
timer.stop("update_sampler_state")
# DEBUG
#print thermodynamics.volume(updated_sampler_state.box_vectors)
# Clean up.
del context
# Restore frequency of barostat.
if old_barostat_frequency:
force.setFrequency(old_barostat_frequency)
timer.report_timing()
# Return updated sampler state.
return updated_sampler_state
#=============================================================================================
# MAIN AND TESTS
#=============================================================================================
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
|
the-stack_106_30300 | """
Display one 4-D image layer using the add_image API
"""
from skimage import data
import napari
blobs = data.binary_blobs(
length=128, blob_size_fraction=0.05, n_dim=2, volume_fraction=0.25
).astype(float)
viewer = napari.view_image(blobs, name='blobs')
@viewer.bind_key('a')
def accept_image(viewer):
msg = 'this is a good image'
viewer.status = msg
print(msg)
next(viewer)
@viewer.bind_key('r')
def reject_image(viewer):
msg = 'this is a bad image'
viewer.status = msg
print(msg)
next(viewer)
def next(viewer):
blobs = data.binary_blobs(
length=128, blob_size_fraction=0.05, n_dim=2, volume_fraction=0.25
).astype(float)
viewer.layers[0].data = blobs
@napari.Viewer.bind_key('w')
def hello(viewer):
# on press
viewer.status = 'hello world!'
yield
# on release
viewer.status = 'goodbye world :('
# change viewer title
viewer.title = 'quality control images'
napari.run()
|
the-stack_106_30301 | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
A bridge to publish and subscribe to EventAdmin events over the network using
MQTT
:author: Thomas Calmant
:copyright: Copyright 2016, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import json
import logging
# Pelix
from pelix.ipopo.decorators import ComponentFactory, Provides, Property, \
Validate, Invalidate, Requires
from pelix.utilities import to_str
import pelix.constants as constants
import pelix.misc.mqtt_client
import pelix.services as services
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
DEFAULT_MQTT_TOPIC = 'pelix/eventadmin'
""" Default MQTT topic to use to propagate events """
EVENT_PROP_SOURCE_UID = 'pelix.eventadmin.mqtt.source'
""" UID of the framework that sent the event """
EVENT_PROP_STARTING_SLASH = 'pelix.eventadmin.mqtt.start_slash'
""" Flag to indicate that the EventAdmin topic starts with a '/' """
# ------------------------------------------------------------------------------
@ComponentFactory(services.FACTORY_EVENT_ADMIN_MQTT)
@Provides(services.SERVICE_EVENT_HANDLER, '_controller')
@Requires('_event', services.SERVICE_EVENT_ADMIN)
@Property('_event_topics', services.PROP_EVENT_TOPICS, '*')
@Property('_host', 'mqtt.host', 'localhost')
@Property('_port', 'mqtt.port', 1883)
@Property('_mqtt_topic', 'mqtt.topic.prefix', DEFAULT_MQTT_TOPIC)
class MqttEventAdminBridge(object):
"""
The EventAdmin MQTT bridge
"""
def __init__(self):
"""
Sets up the members
"""
# MQTT configuration
self._host = "localhost"
self._port = 1883
self._mqtt_topic = None
# MQTT Client
self._mqtt = None
# EventAdmin
self._event = None
self._event_topics = None
# EventHandler service controller
self._controller = False
# Framework UID
self._framework_uid = None
@Validate
def _validate(self, context):
"""
Component validated
"""
# Store the framework UID
self._framework_uid = context.get_property(constants.FRAMEWORK_UID)
if not self._mqtt_topic:
# No topic given, use the default one
self._mqtt_topic = DEFAULT_MQTT_TOPIC
if self._mqtt_topic[-1] == '/':
# Remove trailing slash
self._mqtt_topic = self._mqtt_topic[:-1]
# Create the MQTT client
client_id = "pelix-eventadmin-{0}".format(self._framework_uid)
self._mqtt = pelix.misc.mqtt_client.MqttClient(client_id)
# Customize callbacks
self._mqtt.on_connect = self.__on_connect
self._mqtt.on_disconnect = self.__on_disconnect
self._mqtt.on_message = self.__on_message
# Do not provide the EventHandler service before being connected
self._controller = False
# Prepare the connection
self._mqtt.connect(self._host, self._port)
@Invalidate
def _invalidate(self, context):
"""
Component invalidated
"""
# Disconnect from the server (this stops the loop)
self._mqtt.disconnect()
# Clean up
self._framework_uid = None
self._mqtt = None
def _make_topic(self, suffix):
"""
Prepares a MQTT topic with the given suffix
:param suffix: Suffix to the MQTT bridge topic
:return: A MQTT topic
"""
return "{0}/{1}".format(self._mqtt_topic, suffix)
def __on_connect(self, client, rc):
"""
Client connected to the server
"""
if not rc:
# Connection is OK, subscribe to the topic
client.subscribe(self._make_topic("#"))
# Provide the service
self._controller = True
def __on_disconnect(self, client, rc):
"""
Client has been disconnected from the server
"""
# Disconnected: stop providing the service
self._controller = False
def __on_message(self, client, msg):
"""
A message has been received from a server
:param client: Client that received the message
:param msg: A MQTTMessage bean
"""
try:
self.handle_mqtt_message(msg.topic, msg.payload)
except Exception as ex:
_logger.exception("Error handling an MQTT EventAdmin message: %s",
ex)
def handle_event(self, topic, properties):
"""
An EventAdmin event has been received
"""
# Check that the event wasn't sent by us
if EVENT_PROP_SOURCE_UID in properties:
# A bridge posted this event, ignore it
return
elif services.EVENT_PROP_PROPAGATE not in properties:
# Propagation flag is not set, ignore
_logger.warning("No propagate")
return
# Remove starting '/' in the event, and set up the flag
if topic[0] == '/':
topic = topic[1:]
properties[EVENT_PROP_STARTING_SLASH] = True
# Prepare MQTT data
mqtt_topic = self._make_topic(topic)
payload = json.dumps(properties)
# Publish the event to everybody, with QOS 2
self._mqtt.publish(mqtt_topic, payload, qos=2)
def handle_mqtt_message(self, mqtt_topic, payload):
"""
An MQTT message has been received
:param mqtt_topic: MQTT message topic
:param payload: Payload of the message
"""
# +1 to ignore the joining slash (prefix => prefix/)
evt_topic = mqtt_topic[len(self._mqtt_topic) + 1:]
if not evt_topic:
# Empty EventAdmin topic
_logger.debug("Empty EventAdmin topic: %s", mqtt_topic)
return
try:
# Ensure that the payload is a string
payload = to_str(payload)
# Parse the event payload
properties = json.loads(payload)
except ValueError as ex:
# Oups...
_logger.error("Error parsing the payload of %s: %s", evt_topic, ex)
return
# Check framework UID of the sender
try:
sender_uid = to_str(properties[services.EVENT_PROP_FRAMEWORK_UID])
if sender_uid == self._framework_uid:
# Loop back
return
# Set up source UID as an extra property
properties[EVENT_PROP_SOURCE_UID] = sender_uid
except KeyError:
# Not sent by us... continue
pass
# Update the topic if necessary
if properties.pop(EVENT_PROP_STARTING_SLASH, False):
# Topic has a starting '/'
evt_topic = '/{0}'.format(evt_topic)
# Post the event
self._event.post(evt_topic, properties)
|
the-stack_106_30302 | #!/usr/bin/env python
import os,sys
# import all necessary stuff
import osg
import osgDB
import osgViewer
import osgART
# just a convenience function
def createImageBackground(video):
layer = osgART.VideoLayer()
layer.setSize(video)
geode = osgART.VideoGeode(osgART.VideoGeode.USE_TEXTURE_2D, video)
osgART.addTexturedQuad(geode,video.s(),video.t())
layer.addChild(geode)
return layer
# create a root node
root = osg.Group()
# only use Python path
#osgDB.Registry.instance().setLibraryFilePathList(sys.path)
# create a viewer
viewer = osgViewer.Viewer()
# add some convenience handlers
viewer.addEventHandler(osgViewer.WindowSizeHandler())
viewer.addEventHandler(osgViewer.StatsHandler())
viewer.addEventHandler(osgViewer.ThreadingHandler())
viewer.addEventHandler(osgViewer.HelpHandler())
# set the scene root
viewer.setSceneData(root)
# preload plugins
video_id = osgART.PluginManager.instance().load("osgart_video_artoolkit2")
tracker_id = osgART.PluginManager.instance().load("osgart_tracker_artoolkit2")
# create a video source (move to settings)
video = osgART.Video.cast(osgART.PluginManager.instance().get(video_id))
# open the video
video.open()
# tracker
tracker = osgART.Tracker.cast(osgART.PluginManager.instance().get(tracker_id))
# create a calibration object
calibration = tracker.getOrCreateCalibration()
# load camera parameter
tracker.getOrCreateCalibration().load("data/camera_para.dat")
# initialize the tracker
tracker.setImage(video)
# add a tracker callback
osgART.TrackerCallback.addOrSet(root,tracker)
# create a marker
marker = tracker.addMarker("single;data/patt.hiro;80;0;0")
# set the marker active
marker.setActive(True)
# create a matrix transfrom utilised through osgART
ar_transform = osg.MatrixTransform()
# add the default chain of event handlers
osgART.attachDefaultEventCallbacks(ar_transform,marker)
# add a cube
ar_transform.addChild(osgART.testCube())
# need to set the renderbin > render this one last
ar_transform.getOrCreateStateSet().setRenderBinDetails(100, "RenderBin")
# create a video background
video_background = createImageBackground(video)
# set the renderbin on the video background > rendered first
video_background.getOrCreateStateSet().setRenderBinDetails(0, "RenderBin");
# create a camera from the calibration object
cam = calibration.createCamera();
# attach the AR scene and the video background
cam.addChild(ar_transform)
cam.addChild(video_background)
# add the camera to the root node
root.addChild(cam)
# start the video capture
video.start()
# let the viewer do the rest
viewer.run()
|
the-stack_106_30303 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections import defaultdict, OrderedDict
from typing import Any, Dict, Generator, List, Optional, overload, Tuple, Union
import numpy as np
import torch
import pytorch_lightning as pl
from pytorch_lightning import loops # import as loops to avoid circular imports
from pytorch_lightning.loops.batch import TrainingBatchLoop
from pytorch_lightning.loops.batch.training_batch_loop import _OUTPUTS_TYPE as _BATCH_OUTPUTS_TYPE
from pytorch_lightning.loops.utilities import _get_active_optimizers, _is_max_limit_reached, _v1_8_output_format
from pytorch_lightning.trainer.connectors.logger_connector.result import _ResultCollection
from pytorch_lightning.trainer.progress import BatchProgress, SchedulerProgress
from pytorch_lightning.trainer.supporters import CombinedLoader
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.auto_restart import _collect_states_on_rank_zero_over_collection
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.warnings import WarningCache
_OUTPUTS_TYPE = List[_BATCH_OUTPUTS_TYPE]
class TrainingEpochLoop(loops.Loop[_OUTPUTS_TYPE]):
"""Runs over all batches in a dataloader (one epoch).
Args:
min_steps: The minimum number of steps (batches) to process
max_steps: The maximum number of steps (batches) to process
"""
def __init__(self, min_steps: Optional[int] = None, max_steps: int = -1) -> None:
super().__init__()
if max_steps is None:
rank_zero_deprecation(
"Setting `max_steps = None` is deprecated in v1.5 and will no longer be supported in v1.7."
" Use `max_steps = -1` instead."
)
max_steps = -1
elif max_steps < -1:
raise MisconfigurationException(
f"`max_steps` must be a non-negative integer or -1 (infinite steps). You passed in {max_steps}."
)
self.min_steps = min_steps
self.max_steps = max_steps
self.batch_progress = BatchProgress()
self.scheduler_progress = SchedulerProgress()
self.batch_loop = TrainingBatchLoop()
self.val_loop = loops.EvaluationLoop(verbose=False)
self._results = _ResultCollection(training=True)
self._outputs: _OUTPUTS_TYPE = []
self._warning_cache = WarningCache()
# caches the loaded dataloader state until dataloader objects are available
self._dataloader_state_dict: Dict[str, Any] = {}
self._batches_that_stepped: int = 0
@property
def total_batch_idx(self) -> int:
"""Returns the current batch index (across epochs)"""
# use `ready` instead of `completed` in case this is accessed after `completed` has been increased
# but before the next `ready` increase
return self.batch_progress.total.ready - 1
@property
def batch_idx(self) -> int:
"""Returns the current batch index (within this epoch)"""
# use `ready` instead of `completed` in case this is accessed after `completed` has been increased
# but before the next `ready` increase
return self.batch_progress.current.ready - 1
@property
def global_step(self) -> int:
lightning_module = self.trainer.lightning_module
if lightning_module is None or lightning_module.automatic_optimization:
return self.batch_loop.optimizer_loop.optim_progress.optimizer_steps
return self.batch_loop.manual_loop.optim_step_progress.total.completed
@property
def _is_training_done(self) -> bool:
max_steps_reached = _is_max_limit_reached(self.global_step, self.max_steps)
return max_steps_reached or self._num_ready_batches_reached()
@property
def _is_validation_done(self) -> bool:
# when we are restarting we want to check whether the val loop has finished
return not self.restarting or self.val_loop.done
@property
def done(self) -> bool:
"""Evaluates when to leave the loop."""
return (self._is_training_done and self._is_validation_done) or self.trainer.should_stop
def connect( # type: ignore[override]
self,
batch_loop: Optional[TrainingBatchLoop] = None,
val_loop: Optional["loops.EvaluationLoop"] = None,
) -> None:
"""Optionally connect a custom batch or validation loop to this training epoch loop."""
if batch_loop is not None:
self.batch_loop = batch_loop
if val_loop is not None:
self.val_loop = val_loop
def reset(self) -> None:
"""Resets the internal state of the loop for a new run."""
if self.restarting:
self.batch_progress.reset_on_restart()
self.scheduler_progress.reset_on_restart()
self.batch_loop.optimizer_loop.optim_progress.reset_on_restart()
trainer = self.trainer
if not trainer.state._fault_tolerant_mode.is_enabled and trainer.num_training_batches != float("inf"):
expected_steps = math.ceil(trainer.num_training_batches / trainer.accumulate_grad_batches)
if self.global_step % expected_steps != 0:
rank_zero_warn(
"You're resuming from a checkpoint that ended before the epoch ended. This can cause unreliable"
" results if further training is done. Consider using an end-of-epoch checkpoint or enabling"
" fault-tolerant training:"
" https://pytorch-lightning.readthedocs.io/en/stable/advanced/fault_tolerant_training.html"
)
else:
self.batch_progress.reset_on_run()
self.scheduler_progress.reset_on_run()
self.batch_loop.optimizer_loop.optim_progress.reset_on_run()
# when the epoch starts, the total val batch progress should be reset as it's supposed to count the batches
# seen per epoch, this is useful for tracking when validation is run multiple times per epoch
self.val_loop.epoch_loop.batch_progress.total.reset()
self._outputs = []
def on_run_start(self, data_fetcher: AbstractDataFetcher) -> None: # type: ignore[override]
self._reload_dataloader_state_dict(data_fetcher)
_ = iter(data_fetcher) # creates the iterator inside the fetcher
# add the previous `fetched` value to properly track `is_last_batch` with no prefetching
data_fetcher.fetched += self.batch_progress.current.ready
data_fetcher._start_profiler = self._on_before_fetch
data_fetcher._stop_profiler = self._on_after_fetch
def _on_before_fetch(self) -> None:
self.trainer.profiler.start(f"[{self.__class__.__name__}].train_dataloader_next")
def _on_after_fetch(self) -> None:
self.trainer.profiler.stop(f"[{self.__class__.__name__}].train_dataloader_next")
def advance(self, data_fetcher: AbstractDataFetcher) -> None: # type: ignore[override]
"""Runs a single training batch.
Raises:
StopIteration: When the epoch is canceled by the user returning -1
"""
if self.restarting and self._should_check_val_fx(self.batch_idx, self.batch_progress.is_last_batch):
# skip training and run validation in `on_advance_end`
return
# we are going to train first so the val loop does not need to restart
self.val_loop.restarting = False
if not isinstance(data_fetcher, DataLoaderIterDataFetcher):
batch_idx = self.batch_idx + 1
batch = next(data_fetcher)
else:
batch_idx, batch = next(data_fetcher)
self.batch_progress.is_last_batch = data_fetcher.done
kwargs = self._build_kwargs(OrderedDict(), batch, batch_idx)
self.batch_progress.increment_ready()
self.trainer._logger_connector.on_batch_start(batch, batch_idx)
if batch is None:
self._warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...")
batch_output = []
else:
# hook
self.trainer._call_callback_hooks("on_batch_start")
# hook
self.trainer._call_callback_hooks("on_train_batch_start", batch, batch_idx)
response = self.trainer._call_lightning_module_hook("on_train_batch_start", batch, batch_idx)
self.trainer._call_strategy_hook("on_train_batch_start", batch, batch_idx)
if response == -1:
self.batch_progress.increment_processed()
raise StopIteration
self.batch_progress.increment_started()
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.batch_loop.run(kwargs)
self.batch_progress.increment_processed()
# update non-plateau LR schedulers
# update epoch-interval ones only when we are at the end of training epoch
self.update_lr_schedulers("step", update_plateau_schedulers=False)
if self._num_ready_batches_reached():
self.update_lr_schedulers("epoch", update_plateau_schedulers=False)
batch_end_outputs = self._prepare_outputs_training_batch_end(
batch_output,
lightning_module=self.trainer.lightning_module,
num_optimizers=len(self.trainer.optimizers),
)
self.trainer._call_callback_hooks("on_train_batch_end", batch_end_outputs, batch, batch_idx)
self.trainer._call_lightning_module_hook("on_train_batch_end", batch_end_outputs, batch, batch_idx)
self.trainer._call_callback_hooks("on_batch_end")
self.trainer._logger_connector.on_batch_end()
self.batch_progress.increment_completed()
if is_overridden("training_epoch_end", self.trainer.lightning_module):
self._outputs.append(batch_output)
# -----------------------------------------
# SAVE METRICS TO LOGGERS AND PROGRESS_BAR
# -----------------------------------------
self.trainer._logger_connector.update_train_step_metrics()
def on_advance_end(self) -> None:
# -----------------------------------------
# VALIDATE IF NEEDED
# -----------------------------------------
should_check_val = self._should_check_val_fx(self.batch_idx, self.batch_progress.is_last_batch)
if should_check_val:
self.trainer.validating = True
self._run_validation()
self.trainer.training = True
# update plateau LR scheduler after metrics are logged
self.update_lr_schedulers("step", update_plateau_schedulers=True)
if not self._should_accumulate():
# this is increased once per batch disregarding multiple optimizers or tbptt on purpose for loggers
self._batches_that_stepped += 1
# this will save based on the `batches_that_stepped` value
self._save_loggers_on_train_batch_end()
# if training finished, defer exit to the parent. this assumes there will be enough time in between
# which might not be the case depending on what's in the `*_epoch_end` hooks
if not self._is_training_done:
# if fault tolerant is enabled and process has been notified, exit.
self.trainer._exit_gracefully_on_signal()
def on_run_end(self) -> _OUTPUTS_TYPE:
outputs, self._outputs = self._outputs, []
return outputs
def teardown(self) -> None:
self._results.cpu()
self.batch_loop.teardown()
self.val_loop.teardown()
def on_save_checkpoint(self) -> Dict:
state_dict = super().on_save_checkpoint()
if (
self.trainer is not None
and self.trainer.state._fault_tolerant_mode.is_enabled
and self.trainer.train_dataloader is not None
and not self._num_completed_batches_reached() # did not finish
# TODO: fault-tolerance requires a minimum number of batches so probably should be > 0
and self.batch_progress.current.ready # did start
):
loader: CombinedLoader = self.trainer.train_dataloader
state = loader.state_dict(has_completed=self._has_completed())
if state:
state_dict["dataloader_state_dict"] = _collect_states_on_rank_zero_over_collection(state)
return state_dict
def on_load_checkpoint(self, state_dict: Dict) -> None:
# cache the dataloader state dict until the dataloader objects are available
self._dataloader_state_dict = state_dict.get("dataloader_state_dict")
def _run_validation(self) -> None:
# reload dataloaders
self.val_loop._reload_evaluation_dataloaders()
with torch.no_grad():
self.val_loop.run()
def _accumulated_batches_reached(self) -> bool:
"""Determine if accumulation will be finished by the end of the current batch."""
return self.batch_progress.current.ready % self.trainer.accumulate_grad_batches == 0
def _num_ready_batches_reached(self) -> bool:
"""Checks if we are in the last batch or if there are more batches to follow."""
epoch_finished_on_ready = self.batch_progress.current.ready == self.trainer.num_training_batches
return epoch_finished_on_ready or self.batch_progress.is_last_batch
def _num_completed_batches_reached(self) -> bool:
epoch_finished_on_completed = self.batch_progress.current.completed == self.trainer.num_training_batches
dataloader_consumed_successfully = self.batch_progress.is_last_batch and self._has_completed()
return epoch_finished_on_completed or dataloader_consumed_successfully
def _has_completed(self) -> bool:
return self.batch_progress.current.ready == self.batch_progress.current.completed
def _should_accumulate(self) -> bool:
"""Checks if the optimizer step should be performed or gradients should be accumulated for the current
step."""
accumulation_done = self._accumulated_batches_reached()
# Lightning steps on the final batch
is_final_batch = self._num_ready_batches_reached()
# but the strategy might not
strategy_accumulates_on_final_batch = self.trainer.strategy.handles_gradient_accumulation or not is_final_batch
return not accumulation_done and strategy_accumulates_on_final_batch
@staticmethod
def _prepare_outputs_training_batch_end(
batch_output: _BATCH_OUTPUTS_TYPE,
lightning_module: "pl.LightningModule",
num_optimizers: int,
) -> Union[List[List[Dict[str, Any]]], List[Dict[str, Any]]]:
"""Processes the outputs from the batch loop into the format passed to the ``on_train_batch_end`` hook."""
if not batch_output:
return []
# convert optimizer dicts to list
if lightning_module.automatic_optimization:
batch_output = apply_to_collection(
batch_output, dtype=dict, function=_convert_optim_dict, num_optimizers=num_optimizers
)
array = np.array(batch_output, dtype=object)
# TODO: remove in v1.8
if (
num_optimizers > 1
and lightning_module.truncated_bptt_steps > 0
and is_overridden("on_train_batch_end", lightning_module)
and not _v1_8_output_format(lightning_module.on_train_batch_end)
):
rank_zero_deprecation(
"You are training with multiple optimizers AND truncated backpropagation through time enabled."
" The current format of the `on_train_batch_end(outputs, ...)` is a 2d list with sizes"
" (n_optimizers, tbptt_steps), however, this has been deprecated and will change in version v1.8 to"
" (tbptt_steps, n_optimizers). You can update your code by adding the following parameter to your"
" hook signature: `on_train_batch_end(outputs, ..., new_format=True)`."
)
# (tbptt_steps, n_opt) -> (n_opt, tbptt_steps)
if array.ndim == 1:
array = np.expand_dims(array, 1)
array = array.transpose((1, 0))
# squeeze all single-element dimensions
array = array.squeeze()
array = array.tolist()
array = _recursive_unpad(array)
return array
@staticmethod
def _prepare_outputs_training_epoch_end(
batch_outputs: _OUTPUTS_TYPE,
lightning_module: "pl.LightningModule",
num_optimizers: int,
) -> Union[List[List[List[Dict[str, Any]]]], List[List[Dict[str, Any]]], List[Dict[str, Any]]]:
"""Processes the outputs from the batch loop into the format passed to the ``training_epoch_end`` hook."""
# `batch_outputs` (plural) is the same as `epoch_end_output` (singular)
if not batch_outputs:
return []
# convert optimizer dicts to list
if lightning_module.automatic_optimization:
batch_outputs = apply_to_collection(
batch_outputs, dtype=dict, function=_convert_optim_dict, num_optimizers=num_optimizers
)
array = _recursive_pad(batch_outputs)
# TODO: remove in v1.8
if (
num_optimizers > 1
and lightning_module.truncated_bptt_steps > 0
and not _v1_8_output_format(lightning_module.on_train_epoch_end)
):
rank_zero_deprecation(
"You are training with multiple optimizers AND truncated backpropagation through time enabled."
" The current format of the `training_epoch_end(outputs)` is a 3d list with sizes"
" (n_optimizers, n_batches, tbptt_steps), however, this has been deprecated and will change in version"
" v1.8 to (n_batches, tbptt_steps, n_optimizers). You can update your code by adding the following"
" parameter to your hook signature: `training_epoch_end(outputs, new_format=True)`."
)
# (n_batches, tbptt_steps, n_opt) -> (n_opt, n_batches, tbptt_steps)
if array.ndim == 2:
array = np.expand_dims(array, 2)
array = array.transpose((2, 0, 1))
# squeeze all single-element dimensions
array = array.squeeze()
array = array.tolist()
array = _recursive_unpad(array)
# in case we squeezed from 1-element array to a 0-dim array
array = array if isinstance(array, list) else [array]
# remove residual empty lists
array = [item for item in array if not isinstance(item, list) or len(item)]
return array
def update_lr_schedulers(self, interval: str, update_plateau_schedulers: bool) -> None:
"""updates the lr schedulers based on the given interval."""
if interval == "step" and self._should_accumulate():
return
active_optimizers = _get_active_optimizers(
self.trainer.optimizers, self.trainer.optimizer_frequencies, self.total_batch_idx
)
self._update_learning_rates(
interval=interval,
update_plateau_schedulers=update_plateau_schedulers,
opt_indices=[opt_idx for opt_idx, _ in active_optimizers],
)
def _update_learning_rates(
self, interval: str, update_plateau_schedulers: bool, opt_indices: Optional[List[int]] = None
) -> None:
"""Update learning rates.
Args:
interval: either 'epoch' or 'step'.
update_plateau_schedulers: control whether ``ReduceLROnPlateau`` or non-plateau schedulers get updated.
This is used so non-plateau schedulers can be updated before running validation. Checkpoints are
commonly saved during validation, however, on-plateau schedulers might monitor a validation metric
so they have to be updated separately.
opt_indices: indices of the optimizers to update.
"""
if not self.trainer.lr_scheduler_configs or not self.trainer.lightning_module.automatic_optimization:
return
if opt_indices is None:
opt_indices = []
for config in self.trainer.lr_scheduler_configs:
if config.opt_idx not in opt_indices:
continue
if update_plateau_schedulers ^ config.reduce_on_plateau:
continue
current_idx = self.batch_idx if interval == "step" else self.trainer.current_epoch
current_idx += 1 # account for both batch and epoch starts from 0
# Take step if call to update_learning_rates matches the interval key and
# the current step modulo the schedulers frequency is zero
if config.interval == interval and current_idx % config.frequency == 0:
monitor_val = None
if config.reduce_on_plateau:
# If instance of ReduceLROnPlateau, we need a monitor
monitor_key = config.monitor
monitor_val = self._get_monitor_value(monitor_key)
if monitor_val is None:
if config.strict:
avail_metrics = list(self.trainer.callback_metrics)
raise MisconfigurationException(
f"ReduceLROnPlateau conditioned on metric {monitor_key}"
f" which is not available. Available metrics are: {avail_metrics}."
" Condition can be set using `monitor` key in lr scheduler dict"
)
rank_zero_warn(
f"ReduceLROnPlateau conditioned on metric {monitor_key}"
" which is not available but strict is set to `False`."
" Skipping learning rate update.",
category=RuntimeWarning,
)
continue
self.scheduler_progress.increment_ready()
# update LR
self.trainer._call_lightning_module_hook(
"lr_scheduler_step",
config.scheduler,
config.opt_idx,
monitor_val,
)
self.scheduler_progress.increment_completed()
def _get_monitor_value(self, key: str) -> Any:
# this is a separate method to aid in testing
return self.trainer.callback_metrics.get(key)
def _should_check_val_epoch(self):
return self.trainer.enable_validation and (
self.trainer.check_val_every_n_epoch is None
or (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
)
def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool) -> bool:
"""Decide if we should run validation."""
if not self._should_check_val_epoch():
return False
# val_check_batch is inf for iterable datasets with no length defined
is_infinite_dataset = self.trainer.val_check_batch == float("inf")
if is_last_batch and is_infinite_dataset:
return True
if self.trainer.should_stop:
return True
# TODO(@awaelchli): let training/eval loop handle logic around limit_*_batches and val_check_batch
is_val_check_batch = is_last_batch
if isinstance(self.trainer.limit_train_batches, int) and is_infinite_dataset:
is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0
elif self.trainer.val_check_batch != float("inf"):
# if `check_val_every_n_epoch is `None`, run a validation loop every n training batches
# else condition it based on the batch_idx of the current epoch
current_iteration = (
self._batches_that_stepped if self.trainer.check_val_every_n_epoch is None else batch_idx
)
is_val_check_batch = (current_iteration + 1) % self.trainer.val_check_batch == 0
return is_val_check_batch
def _save_loggers_on_train_batch_end(self) -> None:
"""Flushes loggers to disk."""
if self.trainer.should_stop:
for logger in self.trainer.loggers:
logger.save()
def _reload_dataloader_state_dict(self, data_fetcher: AbstractDataFetcher) -> None:
if self._dataloader_state_dict:
data_fetcher.dataloader.load_state_dict(self._dataloader_state_dict)
self._dataloader_state_dict = None
def _build_kwargs(self, kwargs: OrderedDict, batch: Any, batch_idx: int) -> OrderedDict:
"""Helper method to build the arguments for the current step.
Args:
kwargs: The kwargs passed down to the hooks.
batch: The current batch to run through the step.
batch_idx: The current batch idx.
Returns:
The kwargs passed down to the hooks.
"""
kwargs["batch"] = batch
training_step_fx = getattr(self.trainer.lightning_module, "training_step")
# the `batch_idx` is optional, however, when there's more than 1 argument we cannot differentiate whether the
# user wants the `batch_idx` or another key like `optimizer_idx` as we are not strict about the argument names
if is_param_in_hook_signature(training_step_fx, "batch_idx", min_args=2):
kwargs["batch_idx"] = batch_idx
return kwargs
def _convert_optim_dict(outs: Dict[int, Dict[str, Any]], num_optimizers: int) -> List[Optional[Dict[str, Any]]]:
"""Converts an optimizer dict to a list in which the key of the dict determines the position of the element.
Example::
>>> _convert_optim_dict({0: {"loss": 0.0}, 2: {"loss": 0.2}}, num_optimizers=3)
[{'loss': 0.0}, None, {'loss': 0.2}]
"""
return [outs[opt_idx] if opt_idx in outs else None for opt_idx in range(num_optimizers)]
@overload
def _recursive_unpad(nested: Any, value: Optional[Any] = None) -> Any:
...
@overload
def _recursive_unpad(nested: List[Any], value: Optional[Any] = None) -> List[Any]:
...
def _recursive_unpad(nested: Union[Any, List[Any]], value: Optional[Any] = None) -> Union[Any, List[Any]]:
"""Removes the given pad value from the nested list. Not strictly the reverse operation of
:func:`_recursive_pad` because it removes the padding element everywhere, not just from the end of a list.
Example::
>>> _recursive_unpad([[[0, 1, 0]], [2], [0, 0]], value=0)
[[[1]], [2], []]
"""
if not isinstance(nested, list):
return nested
return [_recursive_unpad(item, value) for item in nested if item != value]
def _recursive_pad(nested: List[Any], fill_value: Optional[Any] = None) -> np.array:
"""Pads a jagged nested list of lists with the given value such that a proper multi-dimensional array can be
formed with rectangular shape. The padding appends to the incomplete lists.
Example::
>>> _recursive_pad([[], [1], [2, 3], [4]], fill_value=0) # doctest: +NORMALIZE_WHITESPACE
array([[0, 0], [1, 0], [2, 3], [4, 0]], dtype=object)
"""
# code adapted from stackexchange:
# https://codereview.stackexchange.com/questions/222623/pad-a-ragged-multidimensional-array-to-rectangular-shape
dimensions = _get_max_shape(nested)
result = np.full(dimensions, fill_value, dtype=object)
for index, value in _iterate_nested_array(nested):
result[index] = value
return result
def _get_dimensions(array: List[Any], level: int = 0) -> Generator:
yield level, len(array)
if all(isinstance(row, list) for row in array):
for row in array:
yield from _get_dimensions(row, level + 1)
def _get_max_shape(array: List[Any]) -> List[int]:
"""Calculates the max size in each dimension of a jagged (non-rectangular) nested list of lists.
Example::
>>> _get_max_shape([[], [[1], [2]], []])
[3, 2, 1]
"""
dimensions = defaultdict(int)
for level, length in _get_dimensions(array):
dimensions[level] = max(dimensions[level], length)
return [value for _, value in sorted(dimensions.items())]
def _iterate_nested_array(array: List[Any], index: Tuple = ()) -> Generator:
if all(isinstance(item, list) for item in array):
for idx, row in enumerate(array):
yield from _iterate_nested_array(row, (*index, idx))
else: # final level
yield (*index, slice(len(array))), array
|
the-stack_106_30305 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=8,
lag=0
# reshape_target_to_2D=True,
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-1,
learning_rate_changes_by_iteration={
1000: 1e-2,
2000: 1e-3,
10000: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 1024
NUM_FILTERS = 50
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 10,
'stride': 2,
'nonlinearity': rectify,
'W': Normal(std=1/sqrt(source.seq_length))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N * NUM_FILTERS)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.output_shape()[1] * source.output_shape()[2],
'W': Normal(std=1/sqrt(N)),
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
the-stack_106_30307 | '''
Created on Apr 10, 2017
@author: gpetrochenkov
'''
from flask import Flask, jsonify, request, Response
from dicttoxml import dicttoxml
from RestAPI.utilities import check_exists, validate_format
from RestAPI.service_workers import get_comma_sep_values, basin_chars
import os
from RestAPI.service_workers import delineate
app = Flask(__name__)
#--------------------------REST ENDPOINTS--------------------------
@app.route('/streamstatsservices/parameters.<string:format_type>',
methods=['POST'])
def basinCharacteristics(format_type):
'''Endpoint for basin characteristics availability and statistics'''
args_dict = {}
#validates output format
args_dict['format'] = validate_format(format_type, ['xml','json'])
#check for required parameters
#args_dict['rcode'] = check_exists('rcode', request.args)
#necessary for computing basin characteristics
if 'workspaceID' in request.args and request.args['workspaceID'] != '':
args_dict['workspaceID'] = str(request.args.get('workspaceID'))
#optional arguments
if 'group' in request.args:
args_dict['group'] = str(request.args.get('group'))
if 'includeparameters' in request.args:
args_dict['includeParameters'] = get_comma_sep_values(
str(request.args.get('includeparameters')))
data = basin_chars(data=args_dict)
if format_type == 'xml':
def default_xml(x):
if x == 'parameters':
return 'parameter'
if x == 'messages':
return 'message'
return Response(dicttoxml({'parameters': data, 'messages': ['Count %d' % len(data)]}, item_func=default_xml), mimetype='text/xml')
else:
return jsonify(**{'parameters': data, 'messages': ['Count %d' % len(data)]})
@app.route('/streamstatsservices/watershed.<string:format_type>',
methods=['POST'])
def watershed(format_type):
'''Endpoint for watershed queries'''
args_dict = {}
#validates output format
args_dict['format'] = validate_format(format_type,
['xml','json', 'geojson'])
#Check if the required parameters entered
args_dict['crs'] = str(check_exists('crs', request.args))
if (('xlocation' not in request.args or 'ylocation' not in request.args) \
and 'workspaceID' not in request.args \
and 'comid' not in request.args):
raise Exception('Need either both the xlocation and ylocation or \
the workspace ID')
#necessary for pour point watershed query
if 'xlocation' in request.args and 'ylocation' in request.args:
try:
args_dict['xlocation'] = float(request.args.get('xlocation'))
args_dict['ylocation'] = float(request.args.get('ylocation'))
except:
raise Exception('xlocation and/or ylocation not valid numbers')
#necessary for workspace ID watersehd query
if 'workspaceID' in request.args:
args_dict['workspaceID'] = str(request.args.get('workspaceID'))
#necessary for pout point watershed query if x/y points not provided
if 'comID' in request.args:
args_dict['comID'] = str(request.args.get('comID'))
#optional arguments
if 'includeparameters' in request.args:
args_dict['includeParameters'] = get_comma_sep_values(
str(request.args.get('includeparameters')))
if 'includefeatures' in request.args:
args_dict['includeFeatures'] = get_comma_sep_values(
str(request.args.get('includefeatures')))
if 'simplify' in request.args:
args_dict['simplify'] = str(request.args.get('simplify')) == 'true'
data = delineate(data = args_dict)
if args_dict['format'] == 'xml':
def default_xml(x):
if x == 'features':
return 'feature'
if x == 'coordinates':
return 'ArrayOfDouble'
if x == 'ArrayOfDouble':
return 'double'
return Response(dicttoxml({'watershed': data}, item_func=default_xml), mimetype='text/xml')
else:
return jsonify(**data)
@app.route('/streamstatsservices/download', methods=["POST"])
def download():
args_dict = {}
#check for required parameters
check_exists('workspaceID', request.args)
#necessary for download query
args_dict['workspaceID'] = str(request.args.get('workspaceID'))
#optional arguments
#ASK about valid input: so far only a Zipped GDB or a SHAPE format
if 'format' in request.args:
args_dict['format'] = validate_format(str(request.args.get('format')),
['', 'SHAPE'])
# do_work(data = args_dict)
data = {"message": "success"}
return jsonify(**data)
@app.route('/streamstatsservices/flowstatistics.<string:format_type>',
methods=['POST'])
def flow_statistics(format_type):
args_dict = {}
#validates output format
args_dict['format'] = validate_format(format_type, ['json'])
#check for required parameters
args_dict['rcode'] = str(check_exists('rcode', request.args))
#necessary for computing flow characteristics
if 'workspaceID' in request.args:
args_dict['workspaceID'] = str(request.args.get('workspaceID'))
#optional arguments
if 'includeflowTypes' in request.args:
args_dict['includeFlowTypes'] = get_comma_sep_values(
str(request.args.get('includeflowtypes')))
# do_work(data = args_dict)
data = {"message": "success"}
return jsonify(**data)
@app.route('/streamstatsservices/features.<string:format_type>',
methods=['POST'])
def features(format_type):
args_dict = {}
#validates output format
args_dict['format'] = validate_format(format_type, \
['xml','json', 'geojson'])
#check for required parameters
args_dict['workspaceID'] = str(check_exists('workspaceID', request.args))
#necessary to return features
if 'includefeatures' in request.args:
args_dict['includeFeatures'] = get_comma_sep_values(
str(request.args.get('includefeatures')))
else:
if format_type == 'geojson': raise Exception('Not valid output format'\
'for availability')
#optional arguments
if 'simplify' in request.args:
args_dict['simplify'] = str(request.args.get('simplify')) == 'true'
if 'crs' in request.args:
args_dict['crs'] = str(request.args.get('crs'))
# do_work(data = args_dict)
data = {"message": "success"}
return jsonify(**data)
if __name__ == '__main__':
app.secret_key = os.urandom(24)
app.run()
|
the-stack_106_30309 | from pysondb import db
import asyncio
class PysonManager:
def __init__(self):
self.data_batch = []
self.is_running = True
self.batch_sync_completed = False
self.db_connection = db.getDb("user_prox_chat_db.json")
async def initalize(self):
asyncio.create_task(self.sync_database())
def insert_data(self, data):
self.data_batch.append(data)
def destroy(self):
self.is_running = False
while(not self.batch_sync_completed):
asyncio.sleep(1)
self.db_connection.close()
async def sync_database(self):
while(self.is_running):
if(len(self.data_batch) > 0):
entries = list(self.data_batch)
print(entries)
self.db_connection.addMany(entries)
del self.data_batch[:len(entries)]
await asyncio.sleep(10)
self.batch_sync_completed = True
|
the-stack_106_30310 |
def BasinStyle(lineColor="#fff",lineWidth=1,fill=False,fillColor="#f33"):
lineStyle = {
"type":"line",
"paint": {
"line-color":lineColor,
"line-width":lineWidth
},
}
fillStyle = {
"type":"fill",
"paint":{
"fill-color":fillColor,
"fill-opacity":[
"case",
["boolean", ["feature-state", "hover"], False],
0.5, 0
]
}
}
style = [lineStyle]
if fill:
style.append(fillStyle)
return style
def SymbolStyle(iconName, textKey = "name", allowOverlap = False, selectedTextColor="#f33"):
return [
{
"type": "symbol",
"layout":{
"icon-image": iconName,
"text-field": ["get", textKey],
"text-size": 12,
"text-offset": [0, 1.25],
"text-anchor": "top",
"icon-allow-overlap": allowOverlap,
"text-allow-overlap": allowOverlap
},
"paint":{
"text-color": [
'case',
['boolean', ['feature-state', 'selected'], False], "#ff3",
"#fff"
],
"text-halo-color": "#000",
"text-halo-width":1,
"text-halo-blur": 3
}
}
]
def CircleStyle():
return [
{
"type": "line",
"paint": {
"line-color": "#f33",
"line-width": 2
}
}
]
def FlowPathStyle(lineWidth=1,hoverWidth=3,color="#fff",colorKey=None):
lineStyle = {
"type": "line",
"paint":{
"line-color": color,
"line-width": lineWidth
}
}
if hoverWidth is not None:
lineStyle["paint"]["line-width"] = [
'case',
['boolean', ['feature-state', 'hover'], False], hoverWidth,
['boolean', ['feature-state', 'selected'], False], hoverWidth,
lineWidth
]
if colorKey is not None:
lineStyle["paint"]["line-color"] = ["get",colorKey]
print(lineStyle)
return [lineStyle]
def SubbasinStyle(fillKey=None):
lineStyle = {
"type": "line",
"paint":{
"line-color": [
'case',
['boolean', ['feature-state', 'selected'], False], "#ff3",
"#fff"
],
"line-width": [
'case',
['boolean', ['feature-state', 'selected'], False], 2,
1
],
}
}
fillStyle = {
"type": "fill",
"paint":{
"fill-color": "#33f",
"fill-opacity": 0.5
}
}
if fillKey is not None:
fillStyle["paint"]["fill-color"] = ["get",fillKey]
return [fillStyle,lineStyle]
def LivingAreaStyle(lineWidth=4,lineColor="#ff3",fill=False):
lineStyle = {
"type": "line",
"paint": {
"line-color": lineColor,
"line-width": lineWidth
}
}
fillStyle = {
"type": "fill",
"paint": {
"fill-color": [
'case',
['boolean', ['feature-state', 'selected'], False], "#ff3",
['boolean', ['feature-state', 'hover'], False], "#f93",
"#fff"
],
"fill-opacity": 0.5
}
}
style = []
if fill:
style.append(fillStyle)
style.append(lineStyle)
return style
def AgricultureAreaStyle():
return [{
"type": "line",
"paint": {
"line-color": "#3f3",
"line-width": 4
}
}]
def IndustryAreaStyle():
return [
{
"type": "fill",
"paint": {
"fill-color": "#33f",
"fill-opacity": 0.5
}
},
{
"type": "line",
"paint": {
"line-color": [
'case',
['boolean', ['feature-state', 'selected'], False], "#ff3",
['boolean', ['feature-state', 'hover'], False], "#f93",
"#fff"
],
"line-width": 2
}
}
]
def StatisticAreaStyle():
return [{
"type": "line",
"paint": {
"line-color": "#f3f",
"line-width": 2
}
}]
def FloodStyle(fillKey=None):
lineStyle = {
"type": "line",
"paint":{
"line-color": "#fff",
"line-width": 1
}
}
fillStyle = {
"type": "fill",
"paint":{
"fill-color": "#33f",
"fill-opacity": 0.5
}
}
if fillKey is not None:
fillStyle["paint"]["fill-color"] = ["get",fillKey]
return [fillStyle,lineStyle] |
the-stack_106_30311 |
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# save model
model.save("model_mnist_cnn.h5") |
the-stack_106_30312 | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example promotes an experiment.
Promoting an experiment permanently applies all the experimental changes made to
its related ad groups, criteria and ads. To add an experiment, run
add_experiment.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
EXPERIMENT_ID = 'INSERT_EXPERIMENT_ID_HERE'
def main(client, experiment_id):
# Initialize appropriate service.
experiment_service = client.GetService('ExperimentService', version='v201601')
# Construct operations and promote experiment.
operations = [{
'operator': 'SET',
'operand': {
'id': experiment_id,
'status': 'PROMOTED'
}
}]
result = experiment_service.mutate(operations)
# Display results.
for experiment in result['value']:
print ('Experiment with name \'%s\' and id \'%s\' was promoted.'
% (experiment['name'], experiment['id']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, EXPERIMENT_ID)
|
the-stack_106_30314 | import collections
import time
from .cache import Cache
class _Link(object):
__slots__ = ('key', 'expire', 'next', 'prev')
def __init__(self, key=None, expire=None):
self.key = key
self.expire = expire
def __reduce__(self):
return _Link, (self.key, self.expire)
def unlink(self):
next = self.next
prev = self.prev
prev.next = next
next.prev = prev
class _Timer(object):
def __init__(self, timer):
self.__timer = timer
self.__nesting = 0
def __call__(self):
if self.__nesting == 0:
return self.__timer()
else:
return self.__time
def __enter__(self):
if self.__nesting == 0:
self.__time = time = self.__timer()
else:
time = self.__time
self.__nesting += 1
return time
def __exit__(self, *exc):
self.__nesting -= 1
def __reduce__(self):
return _Timer, (self.__timer,)
def __getattr__(self, name):
return getattr(self.__timer, name)
class TTLCache(Cache):
"""LRU Cache implementation with per-item time-to-live (TTL) value."""
def __init__(self, maxsize, ttl, timer=time.time, missing=None,
getsizeof=None):
Cache.__init__(self, maxsize, missing, getsizeof)
self.__root = root = _Link()
root.prev = root.next = root
self.__links = collections.OrderedDict()
self.__timer = _Timer(timer)
self.__ttl = ttl
def __contains__(self, key):
try:
link = self.__links[key] # no reordering
except KeyError:
return False
else:
return not (link.expire < self.__timer())
def __getitem__(self, key, cache_getitem=Cache.__getitem__):
try:
link = self.__getlink(key)
except KeyError:
expired = False
else:
expired = link.expire < self.__timer()
if expired:
return self.__missing__(key)
else:
return cache_getitem(self, key)
def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
with self.__timer as time:
self.expire(time)
cache_setitem(self, key, value)
try:
link = self.__getlink(key)
except KeyError:
self.__links[key] = link = _Link(key)
else:
link.unlink()
link.expire = time + self.__ttl
link.next = root = self.__root
link.prev = prev = root.prev
prev.next = root.prev = link
def __delitem__(self, key, cache_delitem=Cache.__delitem__):
cache_delitem(self, key)
link = self.__links.pop(key)
link.unlink()
if link.expire < self.__timer():
raise KeyError(key)
def __iter__(self):
root = self.__root
curr = root.next
while curr is not root:
# "freeze" time for iterator access
with self.__timer as time:
if not (curr.expire < time):
yield curr.key
curr = curr.next
def __len__(self):
root = self.__root
curr = root.next
time = self.__timer()
count = len(self.__links)
while curr is not root and curr.expire < time:
count -= 1
curr = curr.next
return count
def __setstate__(self, state):
self.__dict__.update(state)
root = self.__root
root.prev = root.next = root
for link in sorted(self.__links.values(), key=lambda obj: obj.expire):
link.next = root
link.prev = prev = root.prev
prev.next = root.prev = link
self.expire(self.__timer())
def __repr__(self, cache_repr=Cache.__repr__):
with self.__timer as time:
self.expire(time)
return cache_repr(self)
@property
def currsize(self):
with self.__timer as time:
self.expire(time)
return super(TTLCache, self).currsize
@property
def timer(self):
"""The timer function used by the cache."""
return self.__timer
@property
def ttl(self):
"""The time-to-live value of the cache's items."""
return self.__ttl
def expire(self, time=None):
"""Remove expired items from the cache."""
if time is None:
time = self.__timer()
root = self.__root
curr = root.next
links = self.__links
cache_delitem = Cache.__delitem__
while curr is not root and curr.expire < time:
cache_delitem(self, curr.key)
del links[curr.key]
next = curr.next
curr.unlink()
curr = next
def clear(self):
with self.__timer as time:
self.expire(time)
Cache.clear(self)
def get(self, *args, **kwargs):
with self.__timer:
return Cache.get(self, *args, **kwargs)
def pop(self, *args, **kwargs):
with self.__timer:
return Cache.pop(self, *args, **kwargs)
def setdefault(self, *args, **kwargs):
with self.__timer:
return Cache.setdefault(self, *args, **kwargs)
def popitem(self):
"""Remove and return the `(key, value)` pair least recently used that
has not already expired.
"""
with self.__timer as time:
self.expire(time)
try:
key = next(iter(self.__links))
except StopIteration:
raise KeyError('%s is empty' % self.__class__.__name__)
else:
return (key, self.pop(key))
if hasattr(collections.OrderedDict, 'move_to_end'):
def __getlink(self, key):
value = self.__links[key]
self.__links.move_to_end(key)
return value
else:
def __getlink(self, key):
value = self.__links.pop(key)
self.__links[key] = value
return value
|
the-stack_106_30316 | import os
import sys
from methods import detect_darwin_sdk_path
def is_active():
return True
def get_name():
return "OSX"
def can_build():
if sys.platform == "darwin" or ("OSXCROSS_ROOT" in os.environ):
return True
return False
def get_opts():
from SCons.Variables import BoolVariable, EnumVariable
return [
("osxcross_sdk", "OSXCross SDK version", "darwin14"),
("MACOS_SDK_PATH", "Path to the macOS SDK", ""),
EnumVariable("debug_symbols", "Add debugging symbols to release builds", "yes", ("yes", "no", "full")),
BoolVariable("separate_debug_symbols", "Create a separate file containing debugging symbols", False),
BoolVariable("use_ubsan", "Use LLVM/GCC compiler undefined behavior sanitizer (UBSAN)", False),
BoolVariable("use_asan", "Use LLVM/GCC compiler address sanitizer (ASAN))", False),
BoolVariable("use_tsan", "Use LLVM/GCC compiler thread sanitizer (TSAN))", False),
]
def get_flags():
return []
def configure(env):
## Build type
if env["target"] == "release":
if env["debug_symbols"] != "full":
if env["optimize"] == "speed": # optimize for speed (default)
env.Prepend(CCFLAGS=["-O3", "-fomit-frame-pointer", "-ftree-vectorize", "-msse2"])
else: # optimize for size
env.Prepend(CCFLAGS=["-Os", "-ftree-vectorize", "-msse2"])
if env["debug_symbols"] == "yes":
env.Prepend(CCFLAGS=["-g1"])
if env["debug_symbols"] == "full":
env.Prepend(CCFLAGS=["-g2"])
elif env["target"] == "release_debug":
if env["debug_symbols"] != "full":
if env["optimize"] == "speed": # optimize for speed (default)
env.Prepend(CCFLAGS=["-O2"])
else: # optimize for size
env.Prepend(CCFLAGS=["-Os"])
env.Prepend(CPPDEFINES=["DEBUG_ENABLED"])
if env["debug_symbols"] == "yes":
env.Prepend(CCFLAGS=["-g1"])
if env["debug_symbols"] == "full":
env.Prepend(CCFLAGS=["-g2"])
elif env["target"] == "debug":
env.Prepend(CCFLAGS=["-g3"])
env.Prepend(CPPDEFINES=["DEBUG_ENABLED"])
env.Prepend(LINKFLAGS=["-Xlinker", "-no_deduplicate"])
## Architecture
# Mac OS X no longer runs on 32-bit since 10.7 which is unsupported since 2014
# As such, we only support 64-bit
env["bits"] = "64"
## Compiler configuration
# Save this in environment for use by other modules
if "OSXCROSS_ROOT" in os.environ:
env["osxcross"] = True
if not "osxcross" in env: # regular native build
if env["arch"] == "arm64":
print("Building for macOS 10.15+, platform arm64.")
env.Append(CCFLAGS=["-arch", "arm64", "-mmacosx-version-min=10.15", "-target", "arm64-apple-macos10.15"])
env.Append(LINKFLAGS=["-arch", "arm64", "-mmacosx-version-min=10.15", "-target", "arm64-apple-macos10.15"])
else:
print("Building for macOS 10.9+, platform x86-64.")
env.Append(CCFLAGS=["-arch", "x86_64", "-mmacosx-version-min=10.9"])
env.Append(LINKFLAGS=["-arch", "x86_64", "-mmacosx-version-min=10.9"])
if env["macports_clang"] != "no":
mpprefix = os.environ.get("MACPORTS_PREFIX", "/opt/local")
mpclangver = env["macports_clang"]
env["CC"] = mpprefix + "/libexec/llvm-" + mpclangver + "/bin/clang"
env["LINK"] = mpprefix + "/libexec/llvm-" + mpclangver + "/bin/clang++"
env["CXX"] = mpprefix + "/libexec/llvm-" + mpclangver + "/bin/clang++"
env["AR"] = mpprefix + "/libexec/llvm-" + mpclangver + "/bin/llvm-ar"
env["RANLIB"] = mpprefix + "/libexec/llvm-" + mpclangver + "/bin/llvm-ranlib"
env["AS"] = mpprefix + "/libexec/llvm-" + mpclangver + "/bin/llvm-as"
env.Append(CPPDEFINES=["__MACPORTS__"]) # hack to fix libvpx MM256_BROADCASTSI128_SI256 define
else:
env["CC"] = "clang"
env["CXX"] = "clang++"
detect_darwin_sdk_path("osx", env)
env.Append(CCFLAGS=["-isysroot", "$MACOS_SDK_PATH"])
env.Append(LINKFLAGS=["-isysroot", "$MACOS_SDK_PATH"])
else: # osxcross build
root = os.environ.get("OSXCROSS_ROOT", 0)
basecmd = root + "/target/bin/x86_64-apple-" + env["osxcross_sdk"] + "-"
ccache_path = os.environ.get("CCACHE")
if ccache_path is None:
env["CC"] = basecmd + "cc"
env["CXX"] = basecmd + "c++"
else:
# there aren't any ccache wrappers available for OS X cross-compile,
# to enable caching we need to prepend the path to the ccache binary
env["CC"] = ccache_path + " " + basecmd + "cc"
env["CXX"] = ccache_path + " " + basecmd + "c++"
env["AR"] = basecmd + "ar"
env["RANLIB"] = basecmd + "ranlib"
env["AS"] = basecmd + "as"
env.Append(CPPDEFINES=["__MACPORTS__"]) # hack to fix libvpx MM256_BROADCASTSI128_SI256 define
if env["CXX"] == "clang++":
env.Append(CPPDEFINES=["TYPED_METHOD_BIND"])
env["CC"] = "clang"
env["LINK"] = "clang++"
if env["use_ubsan"] or env["use_asan"] or env["use_tsan"]:
env.extra_suffix += "s"
if env["use_ubsan"]:
env.Append(CCFLAGS=["-fsanitize=undefined"])
env.Append(LINKFLAGS=["-fsanitize=undefined"])
if env["use_asan"]:
env.Append(CCFLAGS=["-fsanitize=address"])
env.Append(LINKFLAGS=["-fsanitize=address"])
if env["use_tsan"]:
env.Append(CCFLAGS=["-fsanitize=thread"])
env.Append(LINKFLAGS=["-fsanitize=thread"])
## Dependencies
if env["builtin_libtheora"]:
if env["arch"] != "arm64":
env["x86_libtheora_opt_gcc"] = True
## Flags
env.Prepend(CPPPATH=["#platform/osx"])
env.Append(
CPPDEFINES=[
"OSX_ENABLED",
"UNIX_ENABLED",
"GLES_ENABLED",
"APPLE_STYLE_KEYS",
"COREAUDIO_ENABLED",
"COREMIDI_ENABLED",
"GL_SILENCE_DEPRECATION",
]
)
env.Append(
LINKFLAGS=[
"-framework",
"Cocoa",
"-framework",
"Carbon",
"-framework",
"OpenGL",
"-framework",
"AGL",
"-framework",
"AudioUnit",
"-framework",
"CoreAudio",
"-framework",
"CoreMIDI",
"-lz",
"-framework",
"IOKit",
"-framework",
"ForceFeedback",
"-framework",
"AVFoundation",
"-framework",
"CoreMedia",
"-framework",
"CoreVideo",
]
)
env.Append(LIBS=["pthread"])
|
the-stack_106_30317 | from django.contrib import messages
from django.shortcuts import render, HttpResponseRedirect
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from .forms import *
from .serializers import *
from django.views import generic
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.views import LoginView
from django.views import View
from django.utils.translation import gettext as _
from django.shortcuts import redirect
from rest_framework.decorators import action
from dal import autocomplete
from django.core import management
from django.db.models import Q
from rest_framework import viewsets,filters
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from .mixins import *
from .permissions import *
from django.conf import settings
import os
class CPEAutocomplete(LoginRequiredMixin,autocomplete.Select2QuerySetView):
def get_queryset(self):
qs = CPE.objects.all().order_by('name')
if self.q:
qs = qs.filter(name__icontains=self.q)
return qs
class GroupsViewSet(LoginRequiredMixin,viewsets.ModelViewSet):
search_fields = ['id','name','description','slug']
filter_backends = (filters.SearchFilter,DjangoFilterBackend)
filterset_fields = ['id','name','description','slug']
serializer_class = GroupSerializer
lookup_field = ('slug')
def get_queryset(self):
return self.request.user.extension.groups.all()
def create(self, request):
serializer = GroupSerializer(data=request.data)
if serializer.is_valid() and self.get_permissions():
serializer.save(extension=request.user.extension)
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
@action(detail=True, methods=['get'])
def analyse_vulnerabilities(self, request, slug):
group = self.get_object()
group.analyse_vulnerabilities()
return Response(_("Group analysed!"))
def get_permissions(self):
if self.action in ['list','retrieve','create']:
self.permission_classes = [IsAuthenticated,]
elif self.action in ['update', 'partial_update','destroy']:
self.permission_classes = [IsGroupOwner,]
return super().get_permissions()
class AssetsViewSet(LoginRequiredMixin,viewsets.ModelViewSet):
search_fields = ['id','name','description','slug']
filter_backends = (filters.SearchFilter,DjangoFilterBackend)
filterset_fields = ['id','name','description','slug']
serializer_class = AssetSerializer
lookup_field = ('slug')
def get_queryset(self):
return self.request.user.extension.getassets().filter(group__slug=self.kwargs['group_slug'])
def create(self, request, group_slug):
group = request.user.extension.groups.get(slug=group_slug)
serializer = AssetSerializer(data=request.data)
if serializer.is_valid() and self.get_permissions():
serializer.save(group=group)
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
@action(detail=True, methods=['get'])
def analyse_vulnerabilities(self, request, group_slug, slug):
asset = self.get_object()
asset.analyse_vulnerabilities()
return Response(_("Asset analysed!"))
def get_permissions(self):
if self.action in ['list','retrieve','create']:
self.permission_classes = [IsGroupOwner,]
elif self.action in ['update', 'partial_update','destroy']:
self.permission_classes = [IsAssetOwner,]
return super().get_permissions()
class SoftwareViewSet(LoginRequiredMixin,viewsets.ModelViewSet):
search_fields = ['id','name','description','version','cpe','slug']
filter_backends = (filters.SearchFilter,DjangoFilterBackend)
filterset_fields = ['id','name','description','version','cpe','slug']
serializer_class = SoftwareSerializer
lookup_field = ('slug')
def get_queryset(self):
return self.request.user.extension.getsoftwares().filter(asset__slug=self.kwargs['asset_slug'])
def create(self, request, group_slug, asset_slug):
asset = request.user.extension.getassets().get(slug=asset_slug)
serializer = SoftwareSerializer(data=request.data)
if serializer.is_valid() and self.get_permissions():
serializer.save(asset=asset)
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
@action(detail=True, methods=['get'])
def analyse_vulnerabilities(self, request, group_slug, asset_slug, slug):
software = self.get_object()
software.analyse_vulnerabilities()
return Response(_("Software analysed!"))
def get_permissions(self):
if self.action in ['list','retrieve','create']:
self.permission_classes = [IsAssetOwner,]
elif self.action in ['update', 'partial_update','destroy']:
self.permission_classes = [IsSoftwareOwner,]
return super().get_permissions()
class VulnerabilityViewSet(LoginRequiredMixin,viewsets.ModelViewSet):
search_fields = ['id','name','description','score','severity','link','slug']
filter_backends = (filters.SearchFilter,DjangoFilterBackend)
filterset_fields = ['id','name','description','score','severity','link','slug']
lookup_field = ('slug')
serializer_class = VulnerabilitySerializer
http_method_names = ['get']
def get_queryset(self):
if self.kwargs.get('software_slug',None) :
return self.request.user.extension.getsoftwares().get(slug=self.kwargs['software_slug']).getvulnerabilities()
elif self.kwargs.get('asset_slug',None) :
return self.request.user.extension.getassets().get(slug=self.kwargs['asset_slug']).getvulnerabilities()
elif self.kwargs.get('group_slug') :
return self.request.user.extension.groups.get(slug=self.kwargs['group_slug']).getvulnerabilities()
def get_permissions(self):
if self.action in ['list','retrieve']:
self.permission_classes = [IsAuthenticated,]
return super().get_permissions()
class Login(LoginView):
template_name = 'Login.html'
class SignUp(SuccessMessageMixin, generic.CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('login')
template_name = 'Registration.html'
success_message = "Your profile was created successfully"
def get(self, request):
if request.user.is_authenticated:
return HttpResponseRedirect('/')
else:
return super().get(request)
class Home(LoginRequiredMixin, View):
template_name='Home.html'
def get(self, request):
data = {'homenav' : True,'available_languages': settings.LANGUAGES}
return render(request, self.template_name, data)
class Groups(LoginRequiredMixin, View):
template_name='Groups.html'
def get(self, request):
groupform = GroupForm()
assetform = AssetForm(user=request.user)
data = {'groupnav' : True,'available_languages': settings.LANGUAGES,'groupform' : groupform,'assetform': assetform}
return render(request, self.template_name, data)
def post(self, request):
if "groupform" in request.POST:
groupform = GroupForm(request.POST)
if groupform.is_valid():
group_add = groupform.save(commit=False)
group_add.extension = request.user.extension
group_add.save()
messages.success(request, _("Group created !"))
return HttpResponseRedirect(request.path_info)
else:
messages.warning(request, _("Invalid group form !"))
return HttpResponseRedirect(request.path_info)
elif "deletegroup" in request.POST:
group_list = request.POST.getlist('groupcheck')
for group in group_list:
group_to_delete = request.user.extension.groups.get(pk=group)
group_to_delete.delete()
if len(group_list) > 1:
messages.success(request, _("Successfully deleted "+str(len(group_list))+" groups !"))
else:
messages.success(request, _("Group deleted !"))
return HttpResponseRedirect(request.path_info)
elif "assetform" in request.POST:
assetform = AssetForm(request.POST,user=request.user)
if assetform.is_valid():
asset_add = assetform.save(commit=False)
asset_add.save()
messages.success(request, _("Asset created !"))
return HttpResponseRedirect(request.path_info)
else:
messages.warning(request, _("Invalid asset form !"))
return HttpResponseRedirect(request.path_info)
elif "deleteasset" in request.POST:
asset_list = request.POST.getlist('assetcheck')
for asset in asset_list:
asset_to_delete = request.user.extension.getassets().get(pk=asset)
asset_to_delete.delete()
if len(asset_list) > 1:
messages.success(request, _("Successfully deleted "+str(len(asset_list))+" assets !"))
else:
messages.success(request, _("Asset deleted !"))
return HttpResponseRedirect(request.path_info)
class AdminPanel(StaffRequiredMixin, View):
template_name='Admin.html'
def get(self, request):
data = {'adminnav': True,'available_languages': settings.LANGUAGES}
return render(request, self.template_name, data)
class GroupProfile(LoginRequiredMixin, View):
template_name='GroupProfile.html'
def get(self, request, groupslug):
group = Group.objects.get(slug=groupslug)
if group in request.user.extension.groups.all():
assetform = AssetForm()
groupform = GroupForm(instance=group)
softwareform = SoftwareForm(group=group)
data = {'available_languages': settings.LANGUAGES, 'groupform': groupform, 'assetform': assetform, 'group': group, 'softwareform': softwareform}
return render(request, self.template_name, data)
else:
return HttpResponseRedirect(request.path_info)
def post(self, request, groupslug):
group = Group.objects.get(slug=groupslug)
if group in request.user.extension.groups.all():
if "groupform" in request.POST:
groupform = GroupForm(request.POST, instance=group)
if groupform.is_valid():
group_update = groupform.save()
messages.success(request, _("Group updated !"))
return HttpResponseRedirect(request.path_info)
else:
messages.warning(request, _("Invalid group form !"))
return HttpResponseRedirect(request.path_info)
elif "assetform" in request.POST:
assetform = AssetForm(request.POST)
if assetform.is_valid():
asset_add = assetform.save(commit=False)
asset_add.group = group
asset_add.save()
messages.success(request, _("Asset added !"))
return HttpResponseRedirect(request.path_info)
else:
messages.warning(request, _("Invalid asset form !"))
return HttpResponseRedirect(request.path_info)
elif "deleteasset" in request.POST:
asset_list = request.POST.getlist('assetcheck')
for asset in asset_list:
asset_to_delete = request.user.extension.getassets().get(pk=asset)
asset_to_delete.delete()
if len(asset_list) > 1:
messages.success(request, _("Successfully deleted "+str(len(asset_list))+" assets !"))
else:
messages.success(request, _("Asset deleted !"))
return HttpResponseRedirect(request.path_info)
elif "deletegroup" in request.POST:
group.delete()
messages.success(request, _("Group deleted !"))
return redirect('groups')
elif "softwareform" in request.POST:
softwareform = SoftwareForm(request.POST, group=group)
if softwareform.is_valid():
softwareform.save()
messages.success(request, _("Software added !"))
return HttpResponseRedirect(request.path_info)
else:
messages.warning(request, _("Invalid software form !"))
return HttpResponseRedirect(request.path_info)
elif "deletesoftware" in request.POST:
software_list = request.POST.getlist('softwarecheck')
for software in software_list:
software_to_delete = request.user.extension.getsoftwares().get(pk=software)
software_to_delete.delete()
if len(software_list) > 1:
messages.success(request, _("Successfully deleted "+str(len(software_list))+" software !"))
else:
messages.success(request, _("Software deleted !"))
return HttpResponseRedirect(request.path_info)
elif "analysegroup" in request.POST:
group.analyse_vulnerabilities()
messages.success(request, _("Group analysed !"))
return HttpResponseRedirect(request.path_info)
class AssetProfile(LoginRequiredMixin, View):
template_name='AssetProfile.html'
def get(self, request, groupslug, assetslug):
group = Group.objects.get(slug=groupslug)
if group in request.user.extension.groups.all():
asset = Asset.objects.get(slug=assetslug)
assetform = AssetForm(instance=asset,user=request.user)
softwareform = SoftwareForm()
data = {'available_languages': settings.LANGUAGES, 'assetform': assetform, 'softwareform': softwareform, 'asset': asset}
return render(request, self.template_name, data)
else:
return HttpResponseRedirect(request.path_info)
def post(self, request, groupslug, assetslug):
group = Group.objects.get(slug=groupslug)
if group in request.user.extension.groups.all():
asset = Asset.objects.get(slug=assetslug)
if "assetform" in request.POST:
assetform = AssetForm(request.POST, instance=asset)
if assetform.is_valid():
asset_update = assetform.save()
messages.success(request, _("Asset updated !"))
return HttpResponseRedirect(request.path_info)
else:
messages.warning(request, _("Invalid asset form !"))
return HttpResponseRedirect(request.path_info)
elif "softwareform" in request.POST:
softwareform = SoftwareForm(request.POST)
if softwareform.is_valid():
software_add = softwareform.save(commit=False)
software_add.asset = asset
software_add.save()
messages.success(request, _("Software added !"))
return HttpResponseRedirect(request.path_info)
else:
messages.warning(request, _("Invalid software form !"))
return HttpResponseRedirect(request.path_info)
elif "deletesoftware" in request.POST:
software_list = request.POST.getlist('softwarecheck')
for software in software_list:
software_to_delete = request.user.extension.getsoftwares().get(pk=software)
software_to_delete.delete()
if len(software_list) > 1:
messages.success(request, _("Successfully deleted "+str(len(software_list))+" software !"))
else:
messages.success(request, _("Software deleted !"))
return HttpResponseRedirect(request.path_info)
elif "deleteasset" in request.POST:
asset.delete()
messages.success(request, _("Asset deleted !"))
return redirect('groupprofile', groupslug=group.slug)
elif "analyseasset" in request.POST:
asset.analyse_vulnerabilities()
messages.success(request, _("Asset analysed !"))
return HttpResponseRedirect(request.path_info)
class SoftwareProfile(LoginRequiredMixin, View):
template_name='SoftwareProfile.html'
def get(self, request, groupslug, assetslug, softwareslug):
group = Group.objects.get(slug=groupslug)
if group in request.user.extension.groups.all():
software = Software.objects.get(slug=softwareslug)
softwareform = SoftwareForm(instance=software,group=group)
data = {'available_languages': settings.LANGUAGES, 'softwareform': softwareform, 'software': software}
return render(request, self.template_name, data)
else:
return HttpResponseRedirect(request.path_info)
def post(self, request, groupslug, assetslug, softwareslug):
group = Group.objects.get(slug=groupslug)
if group in request.user.extension.groups.all():
software = Software.objects.get(slug=softwareslug)
if "softwareform" in request.POST:
softwareform = SoftwareForm(request.POST, instance=software)
if softwareform.is_valid():
software_update = softwareform.save()
messages.success(request, _("Software updated !"))
return HttpResponseRedirect(request.path_info)
else:
messages.warning(request, _("Invalid software form !"))
return HttpResponseRedirect(request.path_info)
elif "deletesoftware" in request.POST:
software.delete()
messages.success(request, _("Software deleted !"))
return redirect('assetprofile', groupslug=group.slug, assetslug=software.asset.slug)
elif "analysesoftware" in request.POST:
software.analyse_vulnerabilities()
messages.success(request, _("Software analysed !"))
return HttpResponseRedirect(request.path_info)
class VulnerabilityProfile(LoginRequiredMixin, View):
template_name='VulnerabilityProfile.html'
def get(self, request, vulnerabilityslug):
vulnerability = Vulnerability.objects.get(slug=vulnerabilityslug)
if vulnerability in request.user.extension.getvulnerabilities():
data = {'available_languages': settings.LANGUAGES,'vulnerability': vulnerability}
return render(request, self.template_name, data)
class Backup(StaffRequiredMixin, View):
template_name='Backup.html'
def get(self, request):
files = os.listdir('backups')
data = {'available_languages': settings.LANGUAGES, 'files' : files}
return render(request, self.template_name, data)
def post(self, request):
if "deletebackup" in request.POST:
backup_list = request.POST.getlist("backupcheck")
for backup in backup_list:
os.remove("backups"+os.path.sep+backup)
if len(backup_list) > 1:
messages.success(request, _("Successfully deleted "+str(len(backup_list))+" backups !"))
else:
messages.success(request, _("Backup deleted !"))
return HttpResponseRedirect(request.path_info)
elif "addbackup" in request.POST:
management.call_command('dbbackup')
messages.success(request, _("Backup added !"))
return HttpResponseRedirect(request.path_info)
elif "restorebackup" in request.POST:
backup_list = request.POST.getlist("backupcheck")
if len(backup_list)>1 or len(backup_list)==0 :
messages.warning(request, _("You must select only one backup to restore !"))
else :
backup_to_restore = backup_list[0]
management.call_command('dbrestore','--noinput','-i'+backup_to_restore)
messages.success(request, _("Backup restored !"))
return HttpResponseRedirect(request.path_info)
class Search(LoginRequiredMixin, View):
template_name='Search.html'
def get(self, request):
search = request.GET.get("search")
if search:
search = request.GET.get("search")
groups = request.user.extension.groups.filter(name__icontains=search)
vulnerabilities = request.user.extension.getvulnerabilities().filter(Q(name__icontains=search) | Q(created_at__icontains=search) | Q(updated_at__icontains=search))
assets = request.user.extension.getassets().filter(Q(name__icontains=search) | Q(created_at__icontains=search) | Q(updated_at__icontains=search))
softwares = request.user.extension.getsoftwares().filter(Q(name__icontains=search) | Q(created_at__icontains=search) | Q(updated_at__icontains=search))
data = {'available_languages': settings.LANGUAGES, 'groups' : groups, 'vulnerabilities' : vulnerabilities, 'assets' : assets, 'softwares' : softwares}
return render(request, self.template_name, data)
else :
messages.warning(request,_('You need to enter a search term !'))
return redirect("home")
|
the-stack_106_30320 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
BLOCK_COUNTS = [3, 4, 6, 3]
BLOCK_FILTERS = [256, 512, 1024, 2048]
BLOCK_FILTERS_INNER = [64, 128, 256, 512]
class ResnetBuilder(object):
def __init__(self, weight_regularizer, trainable=True, training=True, channel_last=False, fuse_bn_relu=True, fuse_bn_add_relu=True):
self.data_format = "NHWC" if channel_last else "NCHW"
self.weight_initializer = flow.variance_scaling_initializer(2, 'fan_in', 'random_normal',
data_format=self.data_format)
self.weight_regularizer = weight_regularizer
self.trainable = trainable
self.training = training
self.fuse_bn_relu = fuse_bn_relu
self.fuse_bn_add_relu = fuse_bn_add_relu
def _conv2d(
self,
name,
input,
filters,
kernel_size,
strides=1,
padding="SAME",
dilations=1,
):
# There are different shapes of weight metric between 'NCHW' and 'NHWC' mode
if self.data_format == "NHWC":
shape = (filters, kernel_size, kernel_size, input.shape[3])
else:
shape = (filters, input.shape[1], kernel_size, kernel_size)
weight = flow.get_variable(
name + "-weight",
shape=shape,
dtype=input.dtype,
initializer=self.weight_initializer,
regularizer=self.weight_regularizer,
model_name="weight",
trainable=self.trainable,
)
return flow.nn.conv2d(input, weight, strides, padding, self.data_format, dilations, name=name)
def _batch_norm(self, inputs, name=None, last=False):
initializer = flow.zeros_initializer() if last else flow.ones_initializer()
axis = 1
if self.data_format =="NHWC":
axis = 3
return flow.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=0.9, # 97,
epsilon=1e-5,
center=True,
scale=True,
trainable=self.trainable,
training=self.training,
gamma_initializer=initializer,
moving_variance_initializer=initializer,
gamma_regularizer=self.weight_regularizer,
beta_regularizer=self.weight_regularizer,
name=name,
)
def _batch_norm_relu(self, inputs, name=None, last=False):
if self.fuse_bn_relu:
initializer = flow.zeros_initializer() if last else flow.ones_initializer()
axis = 1
if self.data_format =="NHWC":
axis = 3
return flow.layers.batch_normalization_relu(
inputs=inputs,
axis=axis,
momentum=0.9,
epsilon=1e-5,
center=True,
scale=True,
trainable=self.trainable,
training=self.training,
gamma_initializer=initializer,
moving_variance_initializer=initializer,
gamma_regularizer=self.weight_regularizer,
beta_regularizer=self.weight_regularizer,
name=name + "_bn_relu",
)
else:
return flow.nn.relu(self._batch_norm(inputs, name + "_bn", last=last))
def _batch_norm_add_relu(self, inputs, addend, name=None, last=False):
if self.fuse_bn_add_relu:
initializer = flow.zeros_initializer() if last else flow.ones_initializer()
axis = 1
if self.data_format =="NHWC":
axis = 3
return flow.layers.batch_normalization_add_relu(
inputs=inputs,
addend=addend,
axis=axis,
momentum=0.9,
epsilon=1e-5,
center=True,
scale=True,
trainable=self.trainable,
training=self.training,
gamma_initializer=initializer,
moving_variance_initializer=initializer,
gamma_regularizer=self.weight_regularizer,
beta_regularizer=self.weight_regularizer,
name=name+"_bn_add_relu",
)
else:
return flow.nn.relu(self._batch_norm(inputs, name+"_bn", last=last) + addend)
def conv2d_affine(self, input, name, filters, kernel_size, strides):
padding = "SAME" if strides > 1 or kernel_size > 1 else "VALID"
output = self._conv2d(name, input, filters, kernel_size, strides, padding)
return output
def bottleneck_transformation(self, input, block_name, filters, filters_inner, strides):
a = self.conv2d_affine(
input, block_name + "_branch2a", filters_inner, 1, 1)
a = self._batch_norm_relu(a, block_name + "_branch2a")
b = self.conv2d_affine(
a, block_name + "_branch2b", filters_inner, 3, strides)
b = self._batch_norm_relu(b, block_name + "_branch2b")
c = self.conv2d_affine(b, block_name + "_branch2c", filters, 1, 1)
return c
def residual_block(self, input, block_name, filters, filters_inner, strides_init):
if strides_init != 1 or block_name == "res2_0":
shortcut = self.conv2d_affine(
input, block_name + "_branch1", filters, 1, strides_init
)
shortcut = self._batch_norm(shortcut, block_name + "_branch1_bn")
else:
shortcut = input
bottleneck = self.bottleneck_transformation(
input, block_name, filters, filters_inner, strides_init,
)
return self._batch_norm_add_relu(bottleneck, shortcut, block_name + "_branch2c", last=True)
def residual_stage(self, input, stage_name, counts, filters, filters_inner, stride_init=2):
output = input
for i in range(counts):
block_name = "%s_%d" % (stage_name, i)
output = self.residual_block(
output, block_name, filters, filters_inner, stride_init if i == 0 else 1
)
return output
def resnet_conv_x_body(self, input):
output = input
for i, (counts, filters, filters_inner) in enumerate(
zip(BLOCK_COUNTS, BLOCK_FILTERS, BLOCK_FILTERS_INNER)
):
stage_name = "res%d" % (i + 2)
output = self.residual_stage(
output, stage_name, counts, filters, filters_inner, 1 if i == 0 else 2
)
return output
def resnet_stem(self, input):
conv1 = self._conv2d("conv1", input, 64, 7, 2)
conv1_bn = self._batch_norm_relu(conv1, "conv1")
pool1 = flow.nn.max_pool2d(
conv1_bn, ksize=3, strides=2, padding="SAME", data_format=self.data_format, name="pool1",
)
return pool1
def resnet50(images, args, trainable=True, training=True):
weight_regularizer = flow.regularizers.l2(args.wd) if args.wd > 0.0 and args.wd < 1.0 else None
builder = ResnetBuilder(weight_regularizer, trainable, training, args.channel_last, args.fuse_bn_relu, args.fuse_bn_add_relu)
if args.pad_output:
if args.channel_last:
paddings = ((0, 0), (0, 0), (0, 0), (0, 1))
else:
paddings = ((0, 0), (0, 1), (0, 0), (0, 0))
images = flow.pad(images, paddings=paddings)
with flow.scope.namespace("Resnet"):
stem = builder.resnet_stem(images)
body = builder.resnet_conv_x_body(stem)
pool5 = flow.nn.avg_pool2d(
body, ksize=7, strides=1, padding="VALID", data_format=builder.data_format, name="pool5",
)
fc8 = flow.layers.dense(
flow.reshape(pool5, (pool5.shape[0], -1)),
units=8, #class_num
use_bias=True,
kernel_initializer=flow.variance_scaling_initializer(2, 'fan_in', 'random_normal'),
bias_initializer=flow.zeros_initializer(),
kernel_regularizer=weight_regularizer,
bias_regularizer=weight_regularizer,
trainable=trainable,
name="fc8",
)
return body,fc8
|
the-stack_106_30321 | import unittest
import pysal.examples as ex
import os
class Example_Tester(unittest.TestCase):
def test_get_path(self):
pathparts = os.path.normpath(ex.get_path('')).split(os.path.sep)
self.localpath = os.path.join(*pathparts[-2:])
self.assertEquals(self.localpath, os.path.normpath('pysal/examples/'))
def test_parser(self):
for example in ex.available():
self.extext = ex.explain(example)
# make sure names are returned
self.assertIsNotNone(self.extext['name'])
# Make sure there's an empty line between titling and description
if self.extext['description'] is not None:
self.assertNotIn('----', self.extext['description'])
if __name__ == '__main__':
unittest.main()
|
the-stack_106_30322 | import rospy
from geometry_msgs.msg import Twist, PoseStamped
from tf2_ros import TransformListener, Buffer
import sys
def local_pose_callback(data):
global local_pose
local_pose = data
if __name__ == '__main__':
vehicle_type = sys.argv[1]
vehicle_id = sys.argv[2]
rospy.init_node(vehicle_type+'_'+vehicle_id+'_precision_landing')
tfBuffer = Buffer()
tflistener = TransformListener(tfBuffer)
cmd_vel_enu = Twist()
local_pose = PoseStamped()
Kp = 1.0
land_vel = 0.5
rospy.Subscriber(vehicle_type+'_'+vehicle_id+"/mavros/local_position/pose", PoseStamped, local_pose_callback,queue_size=1)
cmd_vel_pub = rospy.Publisher('/xtdrone/'+vehicle_type+'_'+vehicle_id+'/cmd_vel_enu', Twist, queue_size=1)
rate = rospy.Rate(50)
while not rospy.is_shutdown():
try:
tfstamped = tfBuffer.lookup_transform('tag_'+vehicle_id, 'map', rospy.Time(0))
except:
continue
# print('tf:',tfstamped.transform.translation.x)
# print(local_pose.pose.position.x)
cmd_vel_enu.linear.x = Kp * (tfstamped.transform.translation.x - local_pose.pose.position.x)
cmd_vel_enu.linear.y = Kp * (tfstamped.transform.translation.y - local_pose.pose.position.y)
cmd_vel_enu.linear.z = -land_vel
# print(cmd_vel_enu)
cmd_vel_pub.publish(cmd_vel_enu)
rate.sleep()
|
the-stack_106_30326 | # privates.py
import sys
import itertools
class PrivateAccessError(Exception):
pass
class PrivateDataMetaclass(type):
def __new__(metacls,name,bases,dct):
function = type(lambda x:x)
privates = set(dct.get('__private__',()))
codes = set()
for val in dct.values():
if isinstance(val,function):
codes.add(val.__code__)
getframe = sys._getframe
count = itertools.count
def __getattribute__(self,attr):
if attr in privates:
for i in count(1):
code = getframe(i).f_code
if code in codes:
break
if code.co_name != '__getattribute__':
raise PrivateAccessError(
"attribute '%s' is private" % attr)
return super(cls,self).__getattribute__(attr)
def __setattr__(self,attr,val):
if attr in privates:
for i in count(1):
code = getframe(i).f_code
if code in codes:
break
if code.co_name != '__setattr__':
raise PrivateAccessError(
"attribute '%s' is private" % attr)
return super(cls,self).__setattr__(attr,val)
dct['__getattribute__'] = __getattribute__
dct['__setattr__'] = __setattr__
cls = type.__new__(metacls,name,bases,dct)
return cls
# And now for a few tests
import traceback
class A(object, metaclass=PrivateDataMetaclass):
__private__ = ['internal']
def __init__(self,n):
self.internal = n
def inc(self):
self.internal += 1
def res(self):
return self.internal
class B(A):
__private__ = ['internal2']
def __init__(self,n,m):
super(B,self).__init__(n)
self.internal2 = m
def inc(self):
super(B,self).inc()
self.internal2 += 2
def res(self):
return self.internal2 + super(B,self).res()
def bad(self):
return self.internal2 + self.internal
a = A(1)
a.inc()
print("Should print 2:")
print(a.res())
print()
print("Should raise PrivateAccessError:")
try:
print(a.internal)
except PrivateAccessError:
traceback.print_exc()
print()
b = B(1,1)
b.inc()
print("Should print 5:")
print(b.res())
print()
print("Should raise PrivateAccessError:")
try:
print(b.internal2)
except PrivateAccessError:
traceback.print_exc()
print()
print("Should raise PrivateAccessError:")
try:
print(b.bad())
except PrivateAccessError:
traceback.print_exc()
print()
|
the-stack_106_30330 | #!/usr/bin/env python3
import argparse
import os
import shutil
import tempfile
import unittest
from unittest.mock import Mock
from unittest.mock import patch
from pico8.build import build
from pico8.game import game
from pico8.lua import lua
class TestDoBuild(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.cwd = os.getcwd()
os.chdir(self.tempdir)
def tearDown(self):
os.chdir(self.cwd)
shutil.rmtree(self.tempdir)
def testErrorOutputFilenameHasWrongExtension(self):
args = argparse.Namespace(filename='foo.xxx')
self.assertEqual(1, build.do_build(args))
def testErrorInputFileDoesNotExist(self):
args = argparse.Namespace(lua='doesnotexist.p8', filename='foo.p8')
self.assertEqual(1, build.do_build(args))
def testErrorInputFileHasWrongExtension(self):
open('in.xxx', 'wb').close()
self.assertTrue(os.path.exists('in.xxx'))
args = argparse.Namespace(lua='in.xxx', filename='foo.p8')
self.assertEqual(1, build.do_build(args))
def testErrorBothInputAndEmptyArgsSpecified(self):
open('in.p8', 'wb').close()
self.assertTrue(os.path.exists('in.p8'))
args = argparse.Namespace(lua='in.p8', empty_lua=True,
filename='foo.p8')
self.assertEqual(1, build.do_build(args))
def testBuildCreatesEmptyDefault(self):
args = argparse.Namespace(filename='foo.p8')
self.assertEqual(0, build.do_build(args))
self.assertTrue(os.path.exists('foo.p8'))
with open('foo.p8', 'rb') as infh:
txt = infh.read()
self.assertIn(b'__gfx__\n00000000', txt)
def testBuildOverwritesExisting(self):
output_cart = game.Game.make_empty_game('foo.p8')
output_cart.gfx.set_sprite(0, [[1, 0, 1], [0, 1, 0], [1, 0, 1]])
output_cart.gff.set_flags(0, 7)
with open('foo.p8', 'wb') as outfh:
output_cart.to_p8_file(outfh, filename='foo.p8')
with open('foo.p8', 'rb') as infh:
txt = infh.read()
self.assertIn(b'__gfx__\n10100000', txt)
self.assertIn(b'__gff__\n07000000', txt)
input_cart = game.Game.make_empty_game('in.p8')
input_cart.gfx.set_sprite(0, [[2, 0, 2], [0, 2, 0], [2, 0, 2]])
with open('in.p8', 'wb') as outfh:
input_cart.to_p8_file(outfh, filename='in.p8')
args = argparse.Namespace(gfx='in.p8', filename='foo.p8')
self.assertEqual(0, build.do_build(args))
with open('foo.p8', 'rb') as infh:
txt = infh.read()
self.assertIn(b'__gfx__\n20200000', txt)
self.assertIn(b'__gff__\n07000000', txt)
def testBuildLuaFromP8(self):
output_cart = game.Game.make_empty_game('foo.p8')
output_cart.lua = lua.Lua.from_lines([b'print("zzz")'], version=8)
with open('foo.p8', 'wb') as outfh:
output_cart.to_p8_file(outfh, filename='foo.p8')
with open('foo.p8', 'rb') as infh:
txt = infh.read()
self.assertIn(b'__lua__\nprint("zzz")\n', txt)
input_cart = game.Game.make_empty_game('in.p8')
input_cart.lua = lua.Lua.from_lines([b'print("hi")'], version=8)
with open('in.p8', 'wb') as outfh:
input_cart.to_p8_file(outfh, filename='in.p8')
args = argparse.Namespace(lua='in.p8', filename='foo.p8')
self.assertEqual(0, build.do_build(args))
with open('foo.p8', 'rb') as infh:
txt = infh.read()
self.assertIn(b'__lua__\nprint("hi")\n', txt)
def testBuildLuaFromLuaFile(self):
output_cart = game.Game.make_empty_game('foo.p8')
output_cart.lua = lua.Lua.from_lines([b'print("zzz")'], version=8)
with open('foo.p8', 'wb') as outfh:
output_cart.to_p8_file(outfh, filename='foo.p8')
with open('foo.p8', 'rb') as infh:
txt = infh.read()
self.assertIn(b'__lua__\nprint("zzz")\n', txt)
with open('in.lua', 'wb') as outfh:
outfh.write(b'print("hi")')
args = argparse.Namespace(lua='in.lua', filename='foo.p8')
self.assertEqual(0, build.do_build(args))
with open('foo.p8', 'rb') as infh:
txt = infh.read()
self.assertIn(b'__lua__\nprint("hi")\n', txt)
def testBuildEmptiesSection(self):
output_cart = game.Game.make_empty_game('foo.p8')
output_cart.gfx.set_sprite(0, [[1, 0, 1], [0, 1, 0], [1, 0, 1]])
output_cart.gff.set_flags(0, 7)
with open('foo.p8', 'wb') as outfh:
output_cart.to_p8_file(outfh, filename='foo.p8')
with open('foo.p8', 'rb') as infh:
txt = infh.read()
self.assertIn(b'__gfx__\n10100000', txt)
self.assertIn(b'__gff__\n07000000', txt)
args = argparse.Namespace(empty_gfx=True, filename='foo.p8')
self.assertEqual(0, build.do_build(args))
with open('foo.p8', 'rb') as infh:
txt = infh.read()
self.assertIn(b'__gfx__\n00000000', txt)
self.assertIn(b'__gff__\n07000000', txt)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_30331 | #coding:utf-8
#
# id: bugs.gh_6740
# title: Allow parenthesized query expression for standard-compliance
# decription:
# https://github.com/FirebirdSQL/firebird/issues/6740
#
# NOTE. Queries which do not use `WITH` clause now can be enclosed in parenthesis,
# but this leads to reduced number of max parts of UNIONed query, from 255 to 128.
#
# Checked on 5.0.0.88.
#
# tracker_id:
# min_versions: ['5.0']
# versions: 5.0
# qmid: None
import pytest
from firebird.qa import db_factory, python_act, Action
# version: 5.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
# # NB! Max limit of unioned-parts is 128 rather than 255!
# ########################
# NUM_OF_UNIONED_PARTS=128
# ########################
#
# unioned_query = '('
# for i in range(0,NUM_OF_UNIONED_PARTS):
# unioned_query = ''.join( (unioned_query, 'select %d ' % (i+1) + ('as i ' if i==0 else '') + 'from rdb$database') )
# if i < NUM_OF_UNIONED_PARTS-1:
# unioned_query = ''.join( (unioned_query, ' union all (') )
#
# unioned_query = ''.join( (unioned_query, ')' * NUM_OF_UNIONED_PARTS) )
# unioned_query += ';'
#
# #print(unioned_query)
#
# cur = db_conn.cursor()
# cur.execute(unioned_query)
# total = 0
# for r in cur:
# total += r[0]
# cur.close()
# print(total)
#---
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
8256
"""
@pytest.mark.version('>=5.0')
@pytest.mark.xfail
def test_1(act_1: Action):
pytest.fail("Test not IMPLEMENTED")
|
the-stack_106_30332 | # The following comments couldn't be translated into the new config version:
# eg to write payload to the oracle database
# replace CondDBCommon.connect = "oracle://cms_orcoff_int2r/CMS_COND_CSC"
# Database output service
import FWCore.ParameterSet.Config as cms
process = cms.Process("ProcessOne")
#PopCon config
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.CondDBCommon.connect = cms.string("sqlite_file:CSCChamberMapValues_20X.db")
process.MessageLogger = cms.Service("MessageLogger",
cout = cms.untracked.PSet(
default = cms.untracked.PSet(
limit = cms.untracked.int32(0)
)
),
destinations = cms.untracked.vstring('cout')
)
process.source = cms.Source("EmptyIOVSource",
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
#change the firstRun if you want a different IOV
firstValue = cms.uint64(1),
interval = cms.uint64(1)
)
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBCommon,
logconnect = cms.untracked.string('sqlite_file:chambermap20X.db'),
toPut = cms.VPSet(cms.PSet(
record = cms.string('CSCChamberMapRcd'),
tag = cms.string('CSCChamberMap')
))
)
process.WriteInDB = cms.EDAnalyzer("CSCChamberMapPopConAnalyzer",
SinceAppendMode = cms.bool(True),
record = cms.string('CSCChamberMapRcd'),
loggingOn = cms.untracked.bool(True),
Source = cms.PSet(
)
)
process.p = cms.Path(process.WriteInDB)
|
the-stack_106_30333 | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--use_wandb', action='store_true', help='use wandb')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
print("As imagens finais serão carregadas em breve...")
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
|
the-stack_106_30335 | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
# @ak._v2._connect.numpy.implements("prod")
def prod(array, axis=None, keepdims=False, mask_identity=False, flatten_records=False):
"""
Args:
array: Array-like data (anything #ak.to_layout recognizes).
axis (None or int): If None, combine all values from the array into
a single scalar result; if an int, group by that axis: `0` is the
outermost, `1` is the first level of nested lists, etc., and
negative `axis` counts from the innermost: `-1` is the innermost,
`-2` is the next level up, etc.
keepdims (bool): If False, this reducer decreases the number of
dimensions by 1; if True, the reduced values are wrapped in a new
length-1 dimension so that the result of this operation may be
broadcasted with the original array.
mask_identity (bool): If True, reducing over empty lists results in
None (an option type); otherwise, reducing over empty lists
results in the operation's identity.
flatten_records (bool): If True, axis=None combines fields from different
records; otherwise, records raise an error.
Multiplies elements of `array` (many types supported, including all
Awkward Arrays and Records). The identity of multiplication is `1` and it
is usually not masked. This operation is the same as NumPy's
[prod](https://docs.scipy.org/doc/numpy/reference/generated/numpy.prod.html)
if all lists at a given dimension have the same length and no None values,
but it generalizes to cases where they do not.
See #ak.sum for a more complete description of nested list and missing
value (None) handling in reducers.
See also #ak.nanprod.
"""
with ak._v2._util.OperationErrorContext(
"ak._v2.prod",
dict(
array=array,
axis=axis,
keepdims=keepdims,
mask_identity=mask_identity,
flatten_records=flatten_records,
),
):
return _impl(array, axis, keepdims, mask_identity, flatten_records)
# @ak._v2._connect.numpy.implements("nanprod")
def nanprod(
array, axis=None, keepdims=False, mask_identity=False, flatten_records=False
):
"""
Args:
array: Array-like data (anything #ak.to_layout recognizes).
axis (None or int): If None, combine all values from the array into
a single scalar result; if an int, group by that axis: `0` is the
outermost, `1` is the first level of nested lists, etc., and
negative `axis` counts from the innermost: `-1` is the innermost,
`-2` is the next level up, etc.
keepdims (bool): If False, this reducer decreases the number of
dimensions by 1; if True, the reduced values are wrapped in a new
length-1 dimension so that the result of this operation may be
broadcasted with the original array.
mask_identity (bool): If True, reducing over empty lists results in
None (an option type); otherwise, reducing over empty lists
results in the operation's identity.
flatten_records (bool): If True, axis=None combines fields from different
records; otherwise, records raise an error.
Like #ak.prod, but treating NaN ("not a number") values as missing.
Equivalent to
ak.prod(ak.nan_to_none(array))
with all other arguments unchanged.
See also #ak.prod.
"""
with ak._v2._util.OperationErrorContext(
"ak._v2.nanprod",
dict(
array=array,
axis=axis,
keepdims=keepdims,
mask_identity=mask_identity,
flatten_records=flatten_records,
),
):
array = ak._v2.operations.ak_nan_to_none._impl(array, False, None)
return _impl(array, axis, keepdims, mask_identity, flatten_records)
def _impl(array, axis, keepdims, mask_identity, flatten_records):
layout = ak._v2.operations.to_layout(array, allow_record=False, allow_other=False)
if axis is None:
if not layout.nplike.known_data or not layout.nplike.known_shape:
reducer_cls = ak._v2._reducers.Prod
def map(x):
return ak._v2._typetracer.UnknownScalar(
np.dtype(reducer_cls.return_dtype(x.dtype))
)
else:
def map(x):
return layout.nplike.prod(x)
def reduce(xs):
if len(xs) == 1:
return xs[0]
else:
return layout.nplike.multiply(xs[0], reduce(xs[1:]))
return reduce(
[
map(x)
for x in layout.completely_flatten(
function_name="ak.prod", flatten_records=flatten_records
)
]
)
else:
behavior = ak._v2._util.behavior_of(array)
out = layout.prod(axis=axis, mask=mask_identity, keepdims=keepdims)
if isinstance(out, (ak._v2.contents.Content, ak._v2.record.Record)):
return ak._v2._util.wrap(out, behavior)
else:
return out
|
the-stack_106_30336 | import os
from abc import ABCMeta, abstractmethod
class SimulationFailedError(BaseException):
"""
This exception will be raised when simulator fails
to calculate the performance of the circuit.
"""
def __init__(self, *args):
if args:
self.message = args[0]
else:
self.message = None
def __str__(self):
if self.message:
return f"{self.message}"
class BaseSimulator(metaclass=ABCMeta):
"""
Abstract base class for any type of simulator.
"""
def __init__(self, path: str):
self.path = path
@abstractmethod
def simulate(self):
pass
class HSpiceSimulator(BaseSimulator):
def __init__(self, path: str, circuit_name: str):
super().__init__(path)
self.circuit_name = circuit_name
def __repr__(self):
return f"HSpiceSimulator({self.path})"
def simulate(self):
path = self.path.replace('/', '\\')
execution_command = r'start/min/wait /D ' + path + \
r' C:\synopsys\Hspice_A-2008.03\BIN\hspicerf.exe ' \
+ self.circuit_name + '.sp -o ' + self.circuit_name
os.system(execution_command)
@staticmethod
def file_reader(file_name: str) -> tuple:
with open(file_name, 'r') as f:
lines = f.readlines()
headers_list = lines[2].split()
lines_list = lines[3].split()
for header, value in zip(headers_list, lines_list):
yield header, value
def write_param(self, topology: list, parameters: list):
with open(self.path + 'param.cir', 'w') as f:
f.write('.PARAM\n')
for header, parameter in zip(topology, parameters):
f.write('+ ' + header + ' = ' + str(parameter) + '\n')
def read_ma0(self) -> list:
""" Read gain, bw, himg, hreal, tmp from .ma0 file"""
file_name = self.path + self.circuit_name + '.ma0'
outputs = []
for header, value in self.file_reader(file_name):
try:
value = float(value)
except ValueError:
raise SimulationFailedError(
f"HSpice could not calculate the response of the {header}. Which is {header}:{value}"
f"Check error logs for more information.") from None
else:
outputs.append((header, value))
return outputs
def read_mt0(self) -> list:
""" Read power, area, temper"""
file_name = self.path + self.circuit_name + '.mt0'
outputs = []
for header, value in self.file_reader(file_name):
try:
value = float(value)
except ValueError:
raise SimulationFailedError(
f"HSpice could not calculate the response of the {header}. Which is {header}:{value}"
f"Check error logs for more information.") from None
else:
outputs.append((header, value))
return outputs
def read_dp0(self, transistor_count: int) -> dict:
""" Read values of transistor from .dp0 file."""
Id = [0.00] * transistor_count
Ibs = [0.00] * transistor_count
Ibd = [0.00] * transistor_count
Vgs = [0.00] * transistor_count
Vds = [0.00] * transistor_count
Vbs = [0.00] * transistor_count
Vth = [0.00] * transistor_count
Vdsat = [0.00] * transistor_count
beta = [0.00] * transistor_count
gm = [0.00] * transistor_count
gds = [0.00] * transistor_count
gmb = [0.00] * transistor_count
with open(self.path + self.circuit_name + '.dp0', 'r') as f:
lines = f.readlines()
row_list = [line.split('|') for line in lines
if '|' in line]
row_list = [[elem.strip() for elem in row
if not elem == '']
for row in row_list]
transistor_names = ['M' + str(x + 1) for x in range(transistor_count)]
for rowN, row in enumerate(row_list):
for colN, elem in enumerate(row):
if elem in transistor_names:
transN = int(elem[-1])
Id[transN - 1] = float(row_list[rowN + 4][colN])
Ibs[transN - 1] = float(row_list[rowN + 5][colN])
Ibd[transN - 1] = float(row_list[rowN + 6][colN])
Vgs[transN - 1] = float(row_list[rowN + 7][colN])
Vds[transN - 1] = float(row_list[rowN + 8][colN])
Vbs[transN - 1] = float(row_list[rowN + 9][colN])
Vth[transN - 1] = float(row_list[rowN + 10][colN])
Vdsat[transN - 1] = float(row_list[rowN + 11][colN])
beta[transN - 1] = float(row_list[rowN + 12][colN])
gm[transN - 1] = float(row_list[rowN + 14][colN])
gds[transN - 1] = float(row_list[rowN + 15][colN])
gmb[transN - 1] = float(row_list[rowN + 16][colN])
return {'Id': Id, 'Ibs': Ibs, 'Ibd': Ibd, 'Vgs': Vgs,
'Vds': Vds, 'Vbs': Vbs, 'Vth': Vth, 'Vdsat': Vdsat,
'beta': beta, 'gm': gm, 'gds': gds, 'gmb': gmb}
|
the-stack_106_30337 | # --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
import warnings
from collections import namedtuple, defaultdict
from docplex.mp.utils import is_string
from docplex.mp.constants import ComparisonType, VarBoundType
from docplex.mp.utils import str_maxed
from docplex.mp.publish import PublishResultAsDf
TConflictConstraint = namedtuple("_TConflictConstraint", ["name", "element", "status"])
def trim_field(element):
return str_maxed(element, maxlen=50)
def to_output_table(conflicts, use_df=True):
# Returns the output tables, as df if pandas is available or as a list
# of named tuple ['Type', 'Status', 'Name', 'Expression']
columns = ['Type', 'Status', 'Name', 'Expression']
TOutputTables = namedtuple('TOutputTables', columns)
def convert_to_pandas_df(c):
return {'Type': 'Constraint',
'Status': c.status.name if c.status is not None else '',
'Name': c.name or '',
'Expression': trim_field(c.element)}
def convert_to_namedtuples(c):
return TOutputTables('Constraint',
c.status,
c.name or '',
trim_field(c.element))
pandas = None
if use_df:
try:
import pandas
except ImportError: # pragma: no cover
print("pandas module not found...")
pandas = None
data_converter = convert_to_pandas_df if pandas and use_df else convert_to_namedtuples
output_data = list(map(data_converter, conflicts))
if use_df:
return pandas.DataFrame(columns=columns, data=output_data)
else:
return output_data
class ConflictRefinerResult(object):
""" This class contains all conflicts as returned by the conflict refiner.
A conflict refiner result contains a list of named tuples of type ``TConflictConstraint``,
the fields of which are:
- an enumerated value of type ``docplex.mp.constants.ConflictStatus`` that indicates the
conflict status type (Excluded, Possible_member, Member...).
- the name of the constraint or None if the constraint corresponds to a variable lower or upper bound.
- a modeling object involved in the conflict:
can be either a constraint or a wrapper representing a variable upper or lower bound.
*New in version 2.11*
"""
def __init__(self, conflicts, refined_by=None):
self._conflicts = conflicts
assert refined_by is None or is_string(refined_by)
self._refined_by = refined_by
@property
def refined_by(self):
'''
Returns a string indicating how the conflicts were produced.
- If the conflicts are created by a program, this field returns None.
- If the conflicts originated from a local CPLEX run, this method returns 'cplex_local'.
- If the conflicts originated from a DOcplexcloud run, this method returns 'cplex_cloud'.
Returns:
A string, or None.
'''
return self._refined_by
def __iter__(self):
return self.iter_conflicts()
def __len__(self):
""" Redefintion of maguic method __len__.
Allows calling len() on an instance of ConflictRefinerResult
to get the number of conflicts
:return: the number of conflicts.
"""
return len(self._conflicts)
def iter_conflicts(self):
""" Returns an iterator on conflicts (named tuples)
:return: an iterator
"""
return iter(self._conflicts)
@property
def number_of_conflicts(self):
""" This property returns the number of conflicts. """
return len(self._conflicts)
def display(self):
""" Displays all conflicts.
"""
print('conflict(s): {0}'.format(self.number_of_conflicts))
for conflict in self.iter_conflicts():
st = conflict.status
elt = conflict.element
if hasattr(conflict.element, 'as_constraint'):
ct = conflict.element.as_constraint()
label = elt.short_typename
else:
ct = elt
label = ct.__class__.__name__
print(" - status: {1}, {0}: {2!s}".format(label, st.name, ct.to_readable_string()))
def display_stats(self):
""" Displays statistics on conflicts.
Display show many conflict elements per type.
"""
def elt_typename(elt):
try:
return elt.short_typename.lower()
except AttributeError: # pragma: no cover
return elt.__class__.__name__.lower()
ncf = self.number_of_conflicts
print('conflict{1}: {0}'.format(ncf, "s" if ncf > 1 else ""))
cf_stats = defaultdict(lambda: 0)
for conflict in self.iter_conflicts():
elt_type = elt_typename(conflict.element)
cf_stats[elt_type] += 1
for eltt, count in cf_stats.items():
if count:
print(" - {0}{2}: {1}".format(eltt, count, ("s" if count > 1 else "")))
def as_output_table(self, use_df=True):
return to_output_table(self, use_df)
def print_information(self):
""" Similar as `display_stats`
"""
self.display_stats()
class VarBoundWrapper(object):
# INTERNAL
def __init__(self, dvar):
self._var = dvar
@property
def var(self):
return self._var
@property
def index(self):
return self._var.index
@property
def short_typename(self): # pragma: no cover
return "Variable Bound"
def as_constraint(self): # pragma: no cover
raise NotImplementedError
def as_constraint_from_symbol(self, op_symbol):
self_var = self.var
var_lb = self.var.lb
op = ComparisonType.cplex_ctsense_to_python_op(op_symbol)
ct = op(self_var, var_lb)
return ct
@classmethod
def make_wrapper(cls, var, bound_type):
if bound_type == VarBoundType.LB:
return VarLbConstraintWrapper(var)
elif bound_type == VarBoundType.UB:
return VarUbConstraintWrapper(var)
else:
return None
class VarLbConstraintWrapper(VarBoundWrapper):
"""
This class is a wrapper for a model variable and its associated lower bound.
Instances of this class are created by the ``refine_conflict`` method when the conflict involves
a variable lower bound. Each of these instances is then referenced by a ``TConflictConstraint`` namedtuple
in the conflict list returned by ``refine_conflict``.
To check whether the lower bound of a variable causes a conflict, wrap the variable and
include the resulting constraint in a ConstraintsGroup.
"""
@property
def short_typename(self):
return "Lower Bound"
def as_constraint(self):
return self.as_constraint_from_symbol('G')
class VarUbConstraintWrapper(VarBoundWrapper):
"""
This class is a wrapper for a model variable and its associated upper bound.
Instances of this class are created by the ``refine_conflict`` method when the conflict involves
a variable upper bound. Each of these instances is then referenced by a ``TConflictConstraint`` namedtuple
in the conflict list returned by ``refine_conflict``.
To check whether the upper bound of a variable causes a conflict, wrap the variable and
include the resulting constraint in a ConstraintsGroup.
"""
@property
def short_typename(self):
return "Upper Bound"
def as_constraint(self):
return self.as_constraint_from_symbol('L')
class ConstraintsGroup(object):
"""
This class is a container for the definition of a group of constraints.
A preference for conflict refinement is associated to the group.
Groups may be assigned preference. A group with a higher preference is more likely to be included in the conflict.
A negative value specifies that the corresponding group should not be considered in the computation
of a conflict. In other words, such groups are not considered part of the model. Groups with a preference of 0 (zero)
are always considered to be part of the conflict.
Args:
preference: A floating-point number that specifies the preference for the group. The higher the number, the
higher the preference.
"""
__slots__ = ('_preference', '_cts')
def __init__(self, preference=1.0, cts=None):
self._preference = preference
self._cts = []
if cts is not None:
self.add_constraints(cts)
@classmethod
def from_var(cls, dvar, bound_type, pref):
""" A class method to build a group fromone variable.
:param dvar: The variable whose bound is part of the conflict.
:param bound_type: An enumerated value of type `VarBoundType`
:param pref: a numerical preference.
:return: an instance of ConstraintsGroup.
See Also:
:class:`docplex.mp.constants.VarBoundType`
"""
cgg = cls(preference=pref, cts=VarBoundWrapper.make_wrapper(dvar, bound_type))
return cgg
@property
def preference(self):
return self._preference
def add_one(self, x):
if x is not None:
self._cts.append(x)
def add_constraint(self, ct):
self._cts.append(ct)
def add_constraints(self, cts):
try:
for ct in cts:
self.add_one(ct)
except TypeError: # not iterable.
self.add_one(cts)
def iter_constraints(self):
return iter(self._cts)
class ConflictRefiner(PublishResultAsDf, object):
''' This class is an abstract algorithm; it operates on interfaces.
A conflict is a set of mutually contradictory constraints and bounds within a model.
Given an infeasible model, the conflict refiner can identify conflicting constraints and bounds
within it. CPLEX refines an infeasible model by examining elements that can be removed from the
conflict to arrive at a minimal conflict.
'''
# static variables for output
output_table_property_name = 'conflicts_output'
default_output_table_name = 'conflicts.csv'
output_table_using_df = True
def __init__(self, output_processing=None):
self.output_table_customizer = output_processing
@classmethod
def _make_atomic_ct_groups(cls, mdl_iter, pref):
# returns a list of singleton groups from a model iterator and a numerical preference.
lcgrps = [ConstraintsGroup(pref, ct) for ct in mdl_iter]
return lcgrps
@classmethod
def var_bounds(cls, mdl, pref=4.0, include_infinity_bounds=True):
""" Returns a list of singleton groups with variable bounds.
This method a list of ConstraintGroup objects, each of which contains a variabel bound.
It replicate sthe behavior of the CPLEX interactive optimizer, that is, it returns
- lower bounds for non-binary variables if different from 0
- upper bound for non-binary-variables if non-default
For binary variables, bounds are not considered, unless the variable is bound; more precisely:
- lower bound is included if >= 0.5
- upper bound is included if <= 0.5
:param mdl: The model being analyzed for conflicts,
:param pref: the preference for variable bounds, the defaut is 4.0
:param include_infinity_bounds: a flag indicating whether infi
:return: a list of `ConstraintsGroup` objects.
"""
grps = []
mdl_inf = mdl.infinity
for dv in mdl.iter_variables():
lb, ub = dv.lb, dv.ub
if not dv.is_binary():
if lb != 0:
if include_infinity_bounds or lb > - mdl_inf:
grps.append(ConstraintsGroup.from_var(dv, VarBoundType.LB, pref))
if include_infinity_bounds or ub < mdl_inf:
grps.append(ConstraintsGroup.from_var(dv, VarBoundType.UB, pref))
else:
if lb >= 0.5:
grps.append(ConstraintsGroup.from_var(dv, VarBoundType.LB, pref))
if ub <= 0.5:
grps.append(ConstraintsGroup.from_var(dv, VarBoundType.UB, pref))
return grps
@classmethod
def linear_constraints(cls, mdl, pref=2.0):
return cls._make_atomic_ct_groups(mdl.iter_linear_constraints(), pref)
@classmethod
def logical_constraints(cls, mdl, pref=1.0):
return cls._make_atomic_ct_groups(mdl.iter_logical_constraints(), pref)
@classmethod
def quadratic_constraints(cls, mdl, pref=1.0):
return cls._make_atomic_ct_groups(mdl.iter_quadratic_constraints(), pref)
def refine_conflict(self, mdl, preferences=None, groups=None, display=False, **kwargs):
""" Starts the conflict refiner on the model.
Args:
mdl: The model to be relaxed.
preferences: A dictionary defining constraint preferences.
groups: A list of ConstraintsGroups.
display: a boolean flag (default is True); if True, displays the result at the end.
kwargs: Accepts named arguments similar to solve.
Returns:
An object of type `ConflictRefinerResut` which holds all information about
the minimal conflict.
See Also:
:class:`ConflictRefinerResult`
"""
if mdl.has_multi_objective():
mdl.fatal("Conflict refiner is not supported for multi-objective")
# take into account local argument overrides
context = mdl.prepare_actual_context(**kwargs)
# log stuff
saved_context_log_output = mdl.context.solver.log_output
saved_log_output_stream = mdl.log_output
try:
mdl.set_log_output(context.solver.log_output)
if mdl.environment.has_cplex:
results = self._refine_conflict_local(mdl, context, preferences, groups)
else:
return mdl.fatal("CPLEX runtime not found: cannot run conflict refiner")
# write conflicts table.write_output_table() handles everything related to
# whether the table should be published etc...
if self.is_publishing_output_table(mdl.context):
self.write_output_table(results.as_output_table(self.output_table_using_df), mdl.context)
if display:
results.display_stats()
return results
finally:
if saved_log_output_stream != mdl.log_output:
mdl.set_log_output_as_stream(saved_log_output_stream)
if saved_context_log_output != mdl.context.solver.log_output:
mdl.context.solver.log_output = saved_context_log_output
# noinspection PyMethodMayBeStatic
def _refine_conflict_cloud(self, mdl, context, preferences=None, groups=None):
# INTERNAL
docloud_context = context.solver.docloud
parameters = context.cplex_parameters
# see if we can reuse the local docloud engine if any?
docloud_engine = mdl._engine_factory.new_docloud_engine(model=mdl,
docloud_context=docloud_context,
log_output=context.solver.log_output_as_stream)
mdl.notify_start_solve()
mdl._fire_start_solve_listeners()
conflict = docloud_engine.refine_conflict(mdl, preferences=preferences, groups=groups, parameters=parameters)
mdl._fire_end_solve_listeners(conflict is not None, None)
#
return conflict
# noinspection PyMethodMayBeStatic
def _refine_conflict_local(self, mdl, context, preferences=None, groups=None):
parameters = context.cplex_parameters
self_engine = mdl.get_engine()
return self_engine.refine_conflict(mdl, preferences, groups, parameters)
@staticmethod
def display_conflicts(conflicts):
"""
This method displays a formatted representation of the conflicts that are provided.
Args:
conflicts: An instance of ``ConflictRefinerResult``
"""
warnings.warn("deprecated: use ConflictRefinerresult.display", DeprecationWarning)
conflicts.display()
|
the-stack_106_30339 | """
You are given a string, S, and a list of words, L, that are all of the same length.
Find all starting indices of substring(s) in S that is a concatenation of each word in L exactly once and without any intervening characters.
For example, given:
S: "barfoothefoobarman"
L: ["foo", "bar"]
You should return the indices: [0,9].
(order does not matter).
"""
class Solution:
# @param S, a string
# @param L, a list of string
# @return a list of integer
def findSubstring(self, S, L):
len_word = len(L[0])
len_L = len(L)
len_S = len(S)
ret = []
for i in range(len_S - len_word * len_L + 1):
list_S = [ S[j:j+len_word] for j in range(i, i + len_L*len_word, len_word)]
found = True
for word in L:
if word in list_S:
list_S.remove(word)
else:
found = False
break
if found:
ret.append(i)
return ret
# Note
# 1. The idea is to slice S to S[i: i+len_L*len_word: len_word] and compare S's substring list with L
# Can improve it with i. replacing the list to dict increase search. ii. KMP
# 2. This is good enough. Can use KMP but it's too complicated.
# See http://c4fun.cn/blog/2014/03/20/leetcode-solution-02/#Substring_with_Concatenation_of_All_Words
# for KMP solution
# 3. Notice line 23, wrapping everything in the range is fast than calculate them in list comprehension
|
the-stack_106_30340 | from itertools import groupby
import numpy as np
from yt.geometry.selection_routines import AlwaysSelector
from yt.utilities.io_handler import BaseIOHandler
from yt.utilities.logger import ytLogger as mylog
# -----------------------------------------------------------------------------
# GAMER shares a similar HDF5 format, and thus io.py as well, with FLASH
# -----------------------------------------------------------------------------
# group grids with consecutive indices together to improve the I/O performance
# --> grids are assumed to be sorted into ascending numerical order already
def grid_sequences(grids):
for _k, g in groupby(enumerate(grids), lambda i_x: i_x[0] - i_x[1].id):
seq = list(v[1] for v in g)
yield seq
def particle_sequences(grids):
for _k, g in groupby(enumerate(grids), lambda i_x: i_x[0] - i_x[1].id):
seq = list(v[1] for v in g)
yield seq[0], seq[-1]
class IOHandlerGAMER(BaseIOHandler):
_particle_reader = False
_dataset_type = "gamer"
def __init__(self, ds):
super(IOHandlerGAMER, self).__init__(ds)
self._handle = ds._handle
self._group_grid = ds._group_grid
self._group_particle = ds._group_particle
self._field_dtype = "float64" # fixed even when FLOAT8 is off
self._particle_handle = ds._particle_handle
self.patch_size = ds.parameters["PatchSize"] * ds.refine_by
self.pgroup = ds.refine_by ** 3 # number of patches in a patch group
def _read_particle_coords(self, chunks, ptf):
chunks = list(chunks) # generator --> list
p_idx = self.ds.index._particle_indices
# shortcuts
par_posx = self._group_particle["ParPosX"]
par_posy = self._group_particle["ParPosY"]
par_posz = self._group_particle["ParPosZ"]
# currently GAMER does not support multiple particle types
assert len(ptf) == 1
ptype = list(ptf.keys())[0]
for chunk in chunks:
for g1, g2 in particle_sequences(chunk.objs):
start = p_idx[g1.id]
end = p_idx[g2.id + 1]
x = np.asarray(par_posx[start:end], dtype=self._field_dtype)
y = np.asarray(par_posy[start:end], dtype=self._field_dtype)
z = np.asarray(par_posz[start:end], dtype=self._field_dtype)
yield ptype, (x, y, z)
def _read_particle_fields(self, chunks, ptf, selector):
chunks = list(chunks) # generator --> list
p_idx = self.ds.index._particle_indices
# shortcuts
par_posx = self._group_particle["ParPosX"]
par_posy = self._group_particle["ParPosY"]
par_posz = self._group_particle["ParPosZ"]
# currently GAMER does not support multiple particle types
assert len(ptf) == 1
ptype = list(ptf.keys())[0]
pfields = ptf[ptype]
for chunk in chunks:
for g1, g2 in particle_sequences(chunk.objs):
start = p_idx[g1.id]
end = p_idx[g2.id + 1]
x = np.asarray(par_posx[start:end], dtype=self._field_dtype)
y = np.asarray(par_posy[start:end], dtype=self._field_dtype)
z = np.asarray(par_posz[start:end], dtype=self._field_dtype)
mask = selector.select_points(x, y, z, 0.0)
if mask is None:
continue
for field in pfields:
data = self._group_particle[field][start:end]
yield (ptype, field), data[mask]
def _read_fluid_selection(self, chunks, selector, fields, size):
chunks = list(chunks) # generator --> list
if any((ftype != "gamer" for ftype, fname in fields)):
raise NotImplementedError
rv = {}
for field in fields:
rv[field] = np.empty(size, dtype=self._field_dtype)
ng = sum(len(c.objs) for c in chunks) # c.objs is a list of grids
mylog.debug(
"Reading %s cells of %s fields in %s grids",
size,
[f2 for f1, f2 in fields],
ng,
)
# shortcuts
ps2 = self.patch_size
ps1 = ps2 // 2
for field in fields:
ds = self._group_grid[field[1]]
offset = 0
for chunk in chunks:
for gs in grid_sequences(chunk.objs):
start = (gs[0].id) * self.pgroup
end = (gs[-1].id + 1) * self.pgroup
buf = ds[start:end, :, :, :]
ngrid = len(gs)
data = np.empty((ngrid, ps2, ps2, ps2), dtype=self._field_dtype)
for g in range(ngrid):
pid0 = g * self.pgroup
data[g, 0:ps1, 0:ps1, 0:ps1] = buf[pid0 + 0, :, :, :]
data[g, 0:ps1, 0:ps1, ps1:ps2] = buf[pid0 + 1, :, :, :]
data[g, 0:ps1, ps1:ps2, 0:ps1] = buf[pid0 + 2, :, :, :]
data[g, ps1:ps2, 0:ps1, 0:ps1] = buf[pid0 + 3, :, :, :]
data[g, 0:ps1, ps1:ps2, ps1:ps2] = buf[pid0 + 4, :, :, :]
data[g, ps1:ps2, ps1:ps2, 0:ps1] = buf[pid0 + 5, :, :, :]
data[g, ps1:ps2, 0:ps1, ps1:ps2] = buf[pid0 + 6, :, :, :]
data[g, ps1:ps2, ps1:ps2, ps1:ps2] = buf[pid0 + 7, :, :, :]
data = data.transpose()
for i, g in enumerate(gs):
offset += g.select(selector, data[..., i], rv[field], offset)
return rv
def _read_chunk_data(self, chunk, fields):
rv = {}
if len(chunk.objs) == 0:
return rv
for g in chunk.objs:
rv[g.id] = {}
# Split into particles and non-particles
fluid_fields, particle_fields = [], []
for ftype, fname in fields:
if ftype in self.ds.particle_types:
particle_fields.append((ftype, fname))
else:
fluid_fields.append((ftype, fname))
# particles
if len(particle_fields) > 0:
selector = AlwaysSelector(self.ds)
rv.update(self._read_particle_selection([chunk], selector, particle_fields))
# fluid
if len(fluid_fields) == 0:
return rv
ps2 = self.patch_size
ps1 = ps2 // 2
for field in fluid_fields:
ds = self._group_grid[field[1]]
for gs in grid_sequences(chunk.objs):
start = (gs[0].id) * self.pgroup
end = (gs[-1].id + 1) * self.pgroup
buf = ds[start:end, :, :, :]
ngrid = len(gs)
data = np.empty((ngrid, ps2, ps2, ps2), dtype=self._field_dtype)
for g in range(ngrid):
pid0 = g * self.pgroup
data[g, 0:ps1, 0:ps1, 0:ps1] = buf[pid0 + 0, :, :, :]
data[g, 0:ps1, 0:ps1, ps1:ps2] = buf[pid0 + 1, :, :, :]
data[g, 0:ps1, ps1:ps2, 0:ps1] = buf[pid0 + 2, :, :, :]
data[g, ps1:ps2, 0:ps1, 0:ps1] = buf[pid0 + 3, :, :, :]
data[g, 0:ps1, ps1:ps2, ps1:ps2] = buf[pid0 + 4, :, :, :]
data[g, ps1:ps2, ps1:ps2, 0:ps1] = buf[pid0 + 5, :, :, :]
data[g, ps1:ps2, 0:ps1, ps1:ps2] = buf[pid0 + 6, :, :, :]
data[g, ps1:ps2, ps1:ps2, ps1:ps2] = buf[pid0 + 7, :, :, :]
data = data.transpose()
for i, g in enumerate(gs):
rv[g.id][field] = data[..., i]
return rv
|
the-stack_106_30342 | """
Define the SeriesGroupBy and DataFrameGroupBy
classes that hold the groupby interfaces (and some implementations).
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc, namedtuple
import copy
from functools import partial
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Hashable,
Iterable,
List,
Mapping,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from pandas._libs import lib, reduction as libreduction
from pandas._typing import ArrayLike, FrameOrSeries, FrameOrSeriesUnion
from pandas.util._decorators import Appender, Substitution, doc
from pandas.core.dtypes.cast import (
find_common_type,
maybe_cast_result_dtype,
maybe_downcast_numeric,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_bool,
is_categorical_dtype,
is_integer_dtype,
is_interval_dtype,
is_numeric_dtype,
is_scalar,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms, nanops
from pandas.core.aggregation import (
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from pandas.core.apply import GroupByApply
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy,
_agg_template,
_apply_docs,
_transform_template,
get_groupby,
group_selection_context,
)
from pandas.core.indexes.api import Index, MultiIndex, all_indexes_same
import pandas.core.indexes.base as ibase
from pandas.core.internals import BlockManager
from pandas.core.series import Series
from pandas.core.util.numba_ import maybe_use_numba
from pandas.plotting import boxplot_frame_groupby
if TYPE_CHECKING:
from pandas.core.internals import Block
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
def generate_property(name: str, klass: Type[FrameOrSeries]):
"""
Create a property for a GroupBy subclass to dispatch to DataFrame/Series.
Parameters
----------
name : str
klass : {DataFrame, Series}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = getattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_allowlisted_properties(klass: Type[FrameOrSeries], allowlist: FrozenSet[str]):
"""
Create GroupBy member defs for DataFrame/Series names in a allowlist.
Parameters
----------
klass : DataFrame or Series class
class where members are defined.
allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
def pinner(cls):
for name in allowlist:
if hasattr(cls, name):
# don't override anything that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_allowlisted_properties(Series, base.series_apply_allowlist)
class SeriesGroupBy(GroupBy[Series]):
_apply_allowlist = base.series_apply_allowlist
def _iterate_slices(self) -> Iterable[Series]:
yield self._selected_obj
@property
def _selection_name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
the name of our object
"""
if self._selection is None:
return self.obj.name
else:
return self._selection
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.groupby([1, 1, 2, 2]).agg(
... minimum='min',
... maximum='max',
... )
minimum maximum
1 1 2
2 3 4"""
)
@Appender(
_apply_docs["template"].format(
input="series", examples=_apply_docs["series_examples"]
)
)
def apply(self, func, *args, **kwargs):
return super().apply(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Series")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result, index = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(result.ravel(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if isinstance(func, str):
return getattr(self, func)(*args, **kwargs)
elif isinstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
ret.columns = columns
else:
cyfunc = self._get_cython_func(func)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except (ValueError, KeyError):
# TODO: KeyError is raised in _python_agg_general,
# see test_groupby.test_basic
result = self._aggregate_named(func, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = create_series_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
if not self.as_index: # pragma: no cover
print("Warning, ignoring as_index=True")
if isinstance(ret, dict):
from pandas import concat
ret = concat(ret.values(), axis=1, keys=[key.label for key in ret.keys()])
return ret
agg = aggregate
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
if isinstance(self._selected_obj, Series):
raise SpecificationError("nested renamer is not supported")
columns = list(arg.keys())
arg = arg.items()
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.append(com.get_callable_name(f) or f)
arg = zip(columns, arg)
results: Dict[base.OutputKey, FrameOrSeriesUnion] = {}
for idx, (name, func) in enumerate(arg):
obj = self
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[base.OutputKey(label=name, position=idx)] = obj.aggregate(func)
if any(isinstance(x, DataFrame) for x in results.values()):
# let higher level handle
return results
output = self._wrap_aggregated_output(results, index=None)
return self.obj._constructor_expanddim(output, columns=columns)
# TODO: index should not be Optional - see GH 35490
def _wrap_series_output(
self,
output: Mapping[base.OutputKey, Union[Series, np.ndarray]],
index: Optional[Index],
) -> FrameOrSeriesUnion:
"""
Wraps the output of a SeriesGroupBy operation into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
index : pd.Index or None
Index to apply to the output.
Returns
-------
Series or DataFrame
Notes
-----
In the vast majority of cases output and columns will only contain one
element. The exception is operations that expand dimensions, like ohlc.
"""
indexed_output = {key.position: val for key, val in output.items()}
columns = Index(key.label for key in output)
result: FrameOrSeriesUnion
if len(output) > 1:
result = self.obj._constructor_expanddim(indexed_output, index=index)
result.columns = columns
elif not columns.empty:
result = self.obj._constructor(
indexed_output[0], index=index, name=columns[0]
)
else:
result = self.obj._constructor_expanddim()
return result
# TODO: Remove index argument, use self.grouper.result_index, see GH 35490
def _wrap_aggregated_output(
self,
output: Mapping[base.OutputKey, Union[Series, np.ndarray]],
index: Optional[Index],
) -> FrameOrSeriesUnion:
"""
Wraps the output of a SeriesGroupBy aggregation into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
Returns
-------
Series or DataFrame
Notes
-----
In the vast majority of cases output will only contain one element.
The exception is operations that expand dimensions, like ohlc.
"""
result = self._wrap_series_output(output=output, index=index)
return self._reindex_output(result)
def _wrap_transformed_output(
self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]]
) -> Series:
"""
Wraps the output of a SeriesGroupBy aggregation into the expected result.
Parameters
----------
output : dict[base.OutputKey, Union[Series, np.ndarray]]
Dict with a sole key of 0 and a value of the result values.
Returns
-------
Series
Notes
-----
output should always contain one element. It is specified as a dict
for consistency with DataFrame methods and _wrap_aggregated_output.
"""
assert len(output) == 1
result = self._wrap_series_output(output=output, index=self.obj.index)
# No transformations increase the ndim of the result
assert isinstance(result, Series)
return result
def _wrap_applied_output(
self, keys: Index, values: Optional[List[Any]], not_indexed_same: bool = False
) -> FrameOrSeriesUnion:
"""
Wrap the output of SeriesGroupBy.apply into the expected result.
Parameters
----------
keys : Index
Keys of groups that Series was grouped by.
values : Optional[List[Any]]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
DataFrame or Series
"""
if len(keys) == 0:
# GH #6265
return self.obj._constructor(
[], name=self._selection_name, index=keys, dtype=np.float64
)
assert values is not None
def _get_index() -> Index:
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823 #24880
index = _get_index()
result: FrameOrSeriesUnion = self._reindex_output(
self.obj._constructor_expanddim(values, index=index)
)
# if self.observed is False,
# keep all-NaN rows created while re-indexing
result = result.stack(dropna=self.observed)
result.name = self._selection_name
return result
elif isinstance(values[0], (Series, DataFrame)):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=_get_index(), name=self._selection_name
)
return self._reindex_output(result)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
initialized = False
for name, group in self:
# Each step of this loop corresponds to
# libreduction._BaseGrouper._apply_to_group
group.name = name # NB: libreduction does not pin name
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, 0)
initialized = True
result[name] = output
return result
@Substitution(klass="Series")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result = self._transform_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(
result.ravel(), index=data.index, name=data.name
)
func = self._get_cython_func(func) or func
if not isinstance(func, str):
return self._transform_general(func, *args, **kwargs)
elif func not in base.transform_kernel_allowlist:
msg = f"'{func}' is not a valid function name for transform(name)"
raise ValueError(msg)
elif func in base.cythonized_kernels or func in base.transformation_kernels:
# cythonized transform or canned "agg+broadcast"
return getattr(self, func)(*args, **kwargs)
# If func is a reduction, we need to broadcast the
# result to the whole group. Compute func result
# and deal with possible broadcasting below.
# Temporarily set observed for dealing with categoricals.
with com.temp_setattr(self, "observed", True):
result = getattr(self, func)(*args, **kwargs)
return self._transform_fast(result)
def _transform_general(self, func, *args, **kwargs):
"""
Transform with a non-str `func`.
"""
klass = type(self._selected_obj)
results = []
for name, group in self:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
if isinstance(res, (DataFrame, Series)):
res = res._values
results.append(klass(res, index=group.index))
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
concatenated = concat(results)
result = self._set_result_index_ordered(concatenated)
else:
result = self.obj._constructor(dtype=np.float64)
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* user-defined funcs
# the cython take a different path (and casting)
if is_numeric_dtype(result.dtype):
common_dtype = find_common_type([self._selected_obj.dtype, result.dtype])
if common_dtype is result.dtype:
result = maybe_downcast_numeric(result, self._selected_obj.dtype)
result.name = self._selected_obj.name
return result
def _transform_fast(self, result) -> Series:
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
ids, _, ngroup = self.grouper.group_info
result = result.reindex(self.grouper.result_index, copy=False)
out = algorithms.take_1d(result._values, ids)
return self.obj._constructor(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._get_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna: bool = True) -> Series:
"""
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new unique observation
mask = codes == -1
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self._selection_name)
return self._reindex_output(result, fill_value=0)
@doc(Series.describe)
def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(
self,
normalize=False,
sort=True,
ascending=False,
bins=None,
dropna: bool = True,
):
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def apply_series_value_counts():
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return apply_series_value_counts()
elif is_categorical_dtype(val):
# GH38672
return apply_series_value_counts()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
lev = lab.cat.categories
lab = lev.take(lab.cat.codes, allow_fill=True, fill_value=lev._na_value)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
sorter = np.lexsort((lab.left, lab.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not len(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not len(lchanges):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
if dropna:
mask = codes[-1] != -1
if mask.all():
dropna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.astype("float")
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is None:
mi = MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=False
)
if is_integer_dtype(out):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, codes[-1]]
_, idx = get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.append(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self._selection_name)
def count(self) -> Series:
"""
Compute count of group, excluding missing values.
Returns
-------
Series
Count of values within each group.
"""
ids, _, ngroups = self.grouper.group_info
val = self.obj._values
mask = (ids != -1) & ~isna(val)
ids = ensure_platform_int(ids)
minlength = ngroups or 0
out = np.bincount(ids[mask], minlength=minlength)
result = self.obj._constructor(
out,
index=self.grouper.result_index,
name=self._selection_name,
dtype="int64",
)
return self._reindex_output(result, fill_value=0)
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None):
"""Calculate pct_change of each value to previous entry in group"""
# TODO: Remove this conditional when #23918 is fixed
if freq:
return self.apply(
lambda x: x.pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq
)
)
if fill_method is None: # GH30463
fill_method = "pad"
limit = 0
filled = getattr(self, fill_method)(limit=limit)
fill_grp = filled.groupby(self.grouper.codes)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@pin_allowlisted_properties(DataFrame, base.dataframe_apply_allowlist)
class DataFrameGroupBy(GroupBy[DataFrame]):
_apply_allowlist = base.dataframe_apply_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
pandas supports "named aggregation"
>>> df.groupby("A").agg(
... b_min=pd.NamedAgg(column="B", aggfunc="min"),
... c_sum=pd.NamedAgg(column="C", aggfunc="sum"))
b_min c_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to apply to that column.
Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a callable or a string alias.
See :ref:`groupby.aggregate.named` for more."""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="DataFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result, index = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
op = GroupByApply(self, func, args, kwargs)
result, how = op.agg()
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
elif args or kwargs:
result = self._aggregate_frame(func, *args, **kwargs)
elif self.axis == 1:
# _aggregate_multiple_funcs does not allow self.axis == 1
result = self._aggregate_frame(func)
else:
# try to treat as if we are passing a list
try:
result, _ = GroupByApply(
self, [func], args=(), kwargs={"_axis": self.axis}
).agg()
# select everything except for the last level, which is the one
# containing the name of the function(s), see GH 32040
result.columns = result.columns.rename(
[self._selected_obj.columns.name] * result.columns.nlevels
).droplevel(-1)
except ValueError as err:
if "no results" not in str(err):
# raised directly by _aggregate_multiple_funcs
raise
result = self._aggregate_frame(func)
except AttributeError:
# catch exception from line 969
# (Series does not have attribute "columns"), see GH 35246
result = self._aggregate_frame(func)
if relabeling:
# used reordered index of columns
result = result.iloc[:, order]
result.columns = columns
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result._convert(datetime=True)
agg = aggregate
def _iterate_slices(self) -> Iterable[Series]:
obj = self._selected_obj
if self.axis == 1:
obj = obj.T
if isinstance(obj, Series) and obj.name not in self.exclusions:
# Occurs when doing DataFrameGroupBy(...)["X"]
yield obj
else:
for label, values in obj.items():
if label in self.exclusions:
continue
yield values
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
) -> DataFrame:
agg_mgr = self._cython_agg_blocks(
how, alt=alt, numeric_only=numeric_only, min_count=min_count
)
return self._wrap_agged_blocks(agg_mgr.blocks, items=agg_mgr.items)
def _cython_agg_blocks(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
) -> BlockManager:
data: BlockManager = self._get_data_to_aggregate()
if numeric_only:
data = data.get_numeric_data(copy=False)
def cast_agg_result(result, values: ArrayLike, how: str) -> ArrayLike:
# see if we can cast the values to the desired dtype
# this may not be the original dtype
assert not isinstance(result, DataFrame)
dtype = maybe_cast_result_dtype(values.dtype, how)
result = maybe_downcast_numeric(result, dtype)
if isinstance(values, Categorical) and isinstance(result, np.ndarray):
# If the Categorical op didn't raise, it is dtype-preserving
result = type(values)._from_sequence(result.ravel(), dtype=values.dtype)
# Note this will have result.dtype == dtype from above
elif isinstance(result, np.ndarray) and result.ndim == 1:
# We went through a SeriesGroupByPath and need to reshape
# GH#32223 includes case with IntegerArray values
result = result.reshape(1, -1)
# test_groupby_duplicate_columns gets here with
# result.dtype == int64, values.dtype=object, how="min"
return result
def py_fallback(bvalues: ArrayLike) -> ArrayLike:
# if self.grouper.aggregate fails, we fall back to a pure-python
# solution
# We get here with a) EADtypes and b) object dtype
obj: FrameOrSeriesUnion
# call our grouper again with only this block
if isinstance(bvalues, ExtensionArray):
# TODO(EA2D): special case not needed with 2D EAs
obj = Series(bvalues)
else:
obj = DataFrame(bvalues.T)
if obj.shape[1] == 1:
# Avoid call to self.values that can occur in DataFrame
# reductions; see GH#28949
obj = obj.iloc[:, 0]
# Create SeriesGroupBy with observed=True so that it does
# not try to add missing categories if grouping over multiple
# Categoricals. This will done by later self._reindex_output()
# Doing it here creates an error. See GH#34951
sgb = get_groupby(obj, self.grouper, observed=True)
result = sgb.aggregate(lambda x: alt(x, axis=self.axis))
assert isinstance(result, (Series, DataFrame)) # for mypy
# In the case of object dtype block, it may have been split
# in the operation. We un-split here.
result = result._consolidate()
assert isinstance(result, (Series, DataFrame)) # for mypy
mgr = result._mgr
assert isinstance(mgr, BlockManager)
# unwrap DataFrame to get array
if len(mgr.blocks) != 1:
# We've split an object block! Everything we've assumed
# about a single block input returning a single block output
# is a lie. See eg GH-39329
return mgr.as_array()
else:
result = mgr.blocks[0].values
return result
def blk_func(bvalues: ArrayLike) -> ArrayLike:
try:
result = self.grouper._cython_operation(
"aggregate", bvalues, how, axis=1, min_count=min_count
)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
if alt is None:
# we cannot perform the operation
# in an alternate way, exclude the block
assert how == "ohlc"
raise
result = py_fallback(bvalues)
return cast_agg_result(result, bvalues, how)
# TypeError -> we may have an exception in trying to aggregate
# continue and exclude the block
# NotImplementedError -> "ohlc" with wrong dtype
new_mgr = data.apply(blk_func, ignore_failures=True)
if not len(new_mgr):
raise DataError("No numeric types to aggregate")
return new_mgr
def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
if self.grouper.nkeys != 1:
raise AssertionError("Number of keys must be 1")
axis = self.axis
obj = self._obj_with_exclusions
result: Dict[Hashable, Union[NDFrame, np.ndarray]] = {}
if axis != obj._info_axis_number:
for name, data in self:
fres = func(data, *args, **kwargs)
result[name] = fres
else:
for name in self.indices:
data = self.get_group(name, obj=obj)
fres = func(data, *args, **kwargs)
result[name] = fres
return self._wrap_frame_output(result, obj)
def _aggregate_item_by_item(self, func, *args, **kwargs) -> DataFrame:
# only for axis==0
obj = self._obj_with_exclusions
result: Dict[Union[int, str], NDFrame] = {}
cannot_agg = []
for item in obj:
data = obj[item]
colg = SeriesGroupBy(data, selection=item, grouper=self.grouper)
try:
result[item] = colg.aggregate(func, *args, **kwargs)
except ValueError as err:
if "Must produce aggregated value" in str(err):
# raised in _aggregate_named, handle at higher level
# see test_apply_with_mutated_index
raise
# otherwise we get here from an AttributeError in _make_wrapper
cannot_agg.append(item)
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
return self.obj._constructor(result, columns=result_columns)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
return self.obj._constructor(index=keys)
# GH12824
first_not_none = next(com.not_none(*values), None)
if first_not_none is None:
# GH9684 - All values are None, return an empty frame.
return self.obj._constructor()
elif isinstance(first_not_none, DataFrame):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
key_index = self.grouper.result_index if self.as_index else None
if isinstance(first_not_none, (np.ndarray, Index)):
# GH#1738: values is list of arrays of unequal lengths
# fall through to the outer else clause
# TODO: sure this is right? we used to do this
# after raising AttributeError above
return self.obj._constructor_sliced(
values, index=key_index, name=self._selection_name
)
elif not isinstance(first_not_none, Series):
# values are not series or array-like but scalars
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
if self.as_index:
return self.obj._constructor_sliced(values, index=key_index)
else:
result = DataFrame(values, index=key_index, columns=[self._selection])
self._insert_inaxis_grouper_inplace(result)
return result
else:
# values are Series
return self._wrap_applied_output_series(
keys, values, not_indexed_same, first_not_none, key_index
)
def _wrap_applied_output_series(
self,
keys,
values: List[Series],
not_indexed_same: bool,
first_not_none,
key_index,
) -> FrameOrSeriesUnion:
# this is to silence a DeprecationWarning
# TODO: Remove when default dtype of empty Series is object
kwargs = first_not_none._construct_axes_dict()
backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs)
values = [x if (x is not None) else backup for x in values]
all_indexed_same = all_indexes_same(x.index for x in values)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
applied_index = self._selected_obj._get_axis(self.axis)
singular_series = len(values) == 1 and applied_index.nlevels == 1
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(keys, values, not_indexed_same=True)
# Combine values
# vstack+constructor is faster than concat and handles MI-columns
stacked_values = np.vstack([np.asarray(v) for v in values])
if self.axis == 0:
index = key_index
columns = first_not_none.index.copy()
if columns.name is None:
# GH6124 - propagate name of Series when it's consistent
names = {v.name for v in values}
if len(names) == 1:
columns.name = list(names)[0]
else:
index = first_not_none.index
columns = key_index
stacked_values = stacked_values.T
result = self.obj._constructor(stacked_values, index=index, columns=columns)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if so.ndim == 2 and so.dtypes.apply(needs_i8_conversion).any():
result = result._convert(datetime=True)
else:
result = result._convert(datetime=True)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
return self._reindex_output(result)
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
for name, group in gen:
object.__setattr__(group, "name", name)
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError as err:
msg = "transform must return a scalar value for each group"
raise ValueError(msg) from err
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = self.obj._constructor(
np.concatenate([res.values] * len(group.index)).reshape(
group.shape
),
columns=group.columns,
index=group.index,
)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1
concatenated = concat(applied, axis=self.axis, verify_integrity=False)
concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass="DataFrame")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result = self._transform_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(result, index=data.index, columns=data.columns)
# optimized transforms
func = self._get_cython_func(func) or func
if not isinstance(func, str):
return self._transform_general(func, *args, **kwargs)
elif func not in base.transform_kernel_allowlist:
msg = f"'{func}' is not a valid function name for transform(name)"
raise ValueError(msg)
elif func in base.cythonized_kernels or func in base.transformation_kernels:
# cythonized transformation or canned "reduction+broadcast"
return getattr(self, func)(*args, **kwargs)
# GH 30918
# Use _transform_fast only when we know func is an aggregation
if func in base.reduction_kernels:
# If func is a reduction, we need to broadcast the
# result to the whole group. Compute func result
# and deal with possible broadcasting below.
# Temporarily set observed for dealing with categoricals.
with com.temp_setattr(self, "observed", True):
result = getattr(self, func)(*args, **kwargs)
if isinstance(result, DataFrame) and result.columns.equals(
self._obj_with_exclusions.columns
):
return self._transform_fast(result)
return self._transform_general(func, *args, **kwargs)
def _transform_fast(self, result: DataFrame) -> DataFrame:
"""
Fast transform path for aggregations
"""
obj = self._obj_with_exclusions
# for each col, reshape to size of original frame by take operation
ids, _, ngroup = self.grouper.group_info
result = result.reindex(self.grouper.result_index, copy=False)
output = [
algorithms.take_1d(result.iloc[:, i].values, ids)
for i, _ in enumerate(result.columns)
]
return self.obj._constructor._from_arrays(
output, columns=result.columns, index=obj.index
)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, str):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis
)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis
)
return fast_path, slow_path
def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
except AssertionError:
raise
except Exception:
# GH#29631 For user-defined function, we can't predict what may be
# raised; see test_transform.test_transform_fastpath_raises
return path, res
# verify fast path does not change columns (and names), otherwise
# its results cannot be joined with those of the slow path
if not isinstance(res_fast, DataFrame):
return path, res
if not res_fast.columns.equals(group.columns):
return path, res
if res_fast.equals(res):
path = fast_path
return path, res
def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame:
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
except TypeError:
# e.g. trying to call nanmean with string values
pass
else:
inds.append(i)
if not output:
raise TypeError("Transform function invalid for data types")
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return self.obj._constructor(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding filtered elements.
Elements from groups are filtered if they do not satisfy the
boolean criterion specified by func.
Parameters
----------
func : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
If False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError(
f"filter function returned a {type(res).__name__}, "
"but expected a scalar bool"
)
return self._apply_filter(indices, dropna)
def __getitem__(self, key):
if self.axis == 1:
# GH 37725
raise ValueError("Cannot subset columns when using axis=1")
# per GH 23566
if isinstance(key, tuple) and len(key) > 1:
# if len == 1, then it becomes a SeriesGroupBy and this is actually
# valid syntax, so don't raise warning
warnings.warn(
"Indexing with multiple keys (implicitly converted to a tuple "
"of keys) will be deprecated, use a list instead.",
FutureWarning,
stacklevel=2,
)
return super().__getitem__(key)
def _gotitem(self, key, ndim: int, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(
subset,
self.grouper,
axis=self.axis,
level=self.level,
grouper=self.grouper,
exclusions=self.exclusions,
selection=key,
as_index=self.as_index,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
mutated=self.mutated,
dropna=self.dropna,
)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(
subset,
level=self.level,
grouper=self.grouper,
selection=key,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
dropna=self.dropna,
)
raise AssertionError("invalid ndim for _gotitem")
def _wrap_frame_output(self, result, obj: DataFrame) -> DataFrame:
result_index = self.grouper.levels[0]
if self.axis == 0:
return self.obj._constructor(
result, index=obj.columns, columns=result_index
).T
else:
return self.obj._constructor(result, index=obj.index, columns=result_index)
def _get_data_to_aggregate(self) -> BlockManager:
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._mgr
else:
return obj._mgr
def _insert_inaxis_grouper_inplace(self, result: DataFrame) -> None:
# zip in reverse so we can always insert at loc 0
columns = result.columns
for name, lev, in_axis in zip(
reversed(self.grouper.names),
reversed(self.grouper.get_group_levels()),
reversed([grp.in_axis for grp in self.grouper.groupings]),
):
# GH #28549
# When using .apply(-), name will be in columns already
if in_axis and name not in columns:
result.insert(0, name, lev)
def _wrap_aggregated_output(
self,
output: Mapping[base.OutputKey, Union[Series, np.ndarray]],
index: Optional[Index],
) -> DataFrame:
"""
Wraps the output of DataFrameGroupBy aggregations into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
Returns
-------
DataFrame
"""
indexed_output = {key.position: val for key, val in output.items()}
columns = Index([key.label for key in output])
columns._set_names(self._obj_with_exclusions._get_axis(1 - self.axis).names)
result = self.obj._constructor(indexed_output)
result.columns = columns
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
result.index = self.grouper.result_index
if self.axis == 1:
result = result.T
return self._reindex_output(result)
def _wrap_transformed_output(
self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]]
) -> DataFrame:
"""
Wraps the output of DataFrameGroupBy transformations into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
Returns
-------
DataFrame
"""
indexed_output = {key.position: val for key, val in output.items()}
result = self.obj._constructor(indexed_output)
if self.axis == 1:
result = result.T
result.columns = self.obj.columns
else:
columns = Index(key.label for key in output)
columns.name = self.obj.columns.name
result.columns = columns
result.index = self.obj.index
return result
def _wrap_agged_blocks(self, blocks: Sequence[Block], items: Index) -> DataFrame:
if not self.as_index:
index = np.arange(blocks[0].values.shape[-1])
mgr = BlockManager(blocks, axes=[items, index])
result = self.obj._constructor(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, axes=[items, index])
result = self.obj._constructor(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(
self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions,
)
def _apply_to_column_groupbys(self, func) -> DataFrame:
from pandas.core.reshape.concat import concat
return concat(
(func(col_groupby) for _, col_groupby in self._iterate_column_groupbys()),
keys=self._selected_obj.columns,
axis=1,
)
def count(self) -> DataFrame:
"""
Compute count of group, excluding missing values.
Returns
-------
DataFrame
Count of values within each group.
"""
data = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
def hfunc(bvalues: ArrayLike) -> ArrayLike:
# TODO(2DEA): reshape would not be necessary with 2D EAs
if bvalues.ndim == 1:
# EA
masked = mask & ~isna(bvalues).reshape(1, -1)
else:
masked = mask & ~isna(bvalues)
counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups, axis=1)
return counted
new_mgr = data.apply(hfunc)
# If we are grouping on categoricals we want unobserved categories to
# return zero, rather than the default of NaN which the reindexing in
# _wrap_agged_blocks() returns. GH 35028
with com.temp_setattr(self, "observed", True):
result = self._wrap_agged_blocks(new_mgr.blocks, items=data.items)
return self._reindex_output(result, fill_value=0)
def nunique(self, dropna: bool = True) -> DataFrame:
"""
Return DataFrame with counts of unique elements in each position.
Parameters
----------
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
value1 value2
id
egg 1 1
ham 1 2
spam 2 1
Check for rows with the same id but conflicting values:
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
from pandas.core.reshape.concat import concat
# TODO: this is duplicative of how GroupBy naturally works
# Try to consolidate with normal wrapping functions
obj = self._obj_with_exclusions
axis_number = obj._get_axis_number(self.axis)
other_axis = int(not axis_number)
if axis_number == 0:
iter_func = obj.items
else:
iter_func = obj.iterrows
results = concat(
[
SeriesGroupBy(content, selection=label, grouper=self.grouper).nunique(
dropna
)
for label, content in iter_func()
],
axis=1,
)
results = cast(DataFrame, results)
if axis_number == 1:
results = results.T
results._get_axis(other_axis).names = obj._get_axis(other_axis).names
if not self.as_index:
results.index = ibase.default_index(len(results))
self._insert_inaxis_grouper_inplace(results)
return results
@Appender(DataFrame.idxmax.__doc__)
def idxmax(self, axis=0, skipna: bool = True):
axis = DataFrame._get_axis_number(axis)
numeric_only = None if axis == 0 else False
def func(df):
# NB: here we use numeric_only=None, in DataFrame it is False GH#38217
res = df._reduce(
nanops.nanargmax,
"argmax",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
)
indices = res._values
index = df._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
return self._python_apply_general(func, self._obj_with_exclusions)
@Appender(DataFrame.idxmin.__doc__)
def idxmin(self, axis=0, skipna: bool = True):
axis = DataFrame._get_axis_number(axis)
numeric_only = None if axis == 0 else False
def func(df):
# NB: here we use numeric_only=None, in DataFrame it is False GH#38217
res = df._reduce(
nanops.nanargmin,
"argmin",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
)
indices = res._values
index = df._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
return self._python_apply_general(func, self._obj_with_exclusions)
boxplot = boxplot_frame_groupby
|
the-stack_106_30343 | #Import functions from the 'article_duplicates_modules.py'
import os
from article_duplicates_modules import minimize_difference, deduplicator
textlist5 = minimize_difference(textlist4)
textlist6 = deduplicator(textlist5)
#Print output into a file
#Since new line characters are cleaned from the articles, they are one long string of text
#This format will be useful in a later step
os.chdir('..') #Do not use 'Prescribed1'
article_dates_output = open("article_duplicates_output.txt", "w+")
print("\n\n".join(textlist6), file = article_dates_output)
article_dates_output.close() |
the-stack_106_30344 | # from pathlib import Path
import json, pdb, os, numpy as np, cv2, threading, math #collections, random
# import pickle, sys, itertools, string, sys, re, datetime, time, shutil, copy
from urllib.request import urlopen
# from tempfile import NamedTemporaryFile
import torch
from torch import nn, cuda, backends, FloatTensor, LongTensor, optim
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils.model_zoo import load_url
from model.SigNet import main
from cocoapp import app
cats = {
1: 'ground',
2: 'coconut_tree'
}
id2cat = list(cats.values())
sz = 224
def open_image(fn):
""" Opens an image using OpenCV given the file path.
Arguments:
fn: the file path of the image
Returns:
The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0
"""
flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR
if not os.path.exists(fn):
raise OSError('No such file or directory: {}'.format(fn))
elif os.path.isdir(fn):
raise OSError('Is a directory: {}'.format(fn))
else:
#res = np.array(Image.open(fn), dtype=np.float32)/255
#if len(res.shape)==2: res = np.repeat(res[...,None],3,2)
#return res
try:
im = cv2.imread(str(fn), flags).astype(np.float32)/255
if im is None: raise OSError(f'File not recognized by opencv: {fn}')
return cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
except Exception as e:
raise OSError('Error handling image at: {}'.format(fn)) from e
# getting val_tfms to work without fastai import
from enum import IntEnum
class TfmType(IntEnum):
""" Type of transformation.
Parameters
IntEnum: predefined types of transformations
NO: the default, y does not get transformed when x is transformed.
PIXEL: x and y are images and should be transformed in the same way.
Example: image segmentation.
COORD: y are coordinates (i.e bounding boxes)
CLASS: y are class labels (same behaviour as PIXEL, except no normalization)
"""
NO = 1
PIXEL = 2
COORD = 3
CLASS = 4
class CropType(IntEnum):
""" Type of image cropping.
"""
RANDOM = 1
CENTER = 2
NO = 3
GOOGLENET = 4
class ChannelOrder():
'''
changes image array shape from (h, w, 3) to (3, h, w).
tfm_y decides the transformation done to the y element.
'''
def __init__(self, tfm_y=TfmType.NO): self.tfm_y=tfm_y
def __call__(self, x, y):
x = np.rollaxis(x, 2)
#if isinstance(y,np.ndarray) and (len(y.shape)==3):
if self.tfm_y==TfmType.PIXEL: y = np.rollaxis(y, 2)
elif self.tfm_y==TfmType.CLASS: y = y[...,0]
return x,y
class Transforms():
def __init__(self, sz, tfms, normalizer, denorm, crop_type=CropType.CENTER,
tfm_y=TfmType.NO, sz_y=None):
if sz_y is None: sz_y = sz
self.sz,self.denorm,self.norm,self.sz_y = sz,denorm,normalizer,sz_y
crop_tfm = crop_fn_lu[crop_type](sz, tfm_y, sz_y)
self.tfms = tfms + [crop_tfm, normalizer, ChannelOrder(tfm_y)]
def __call__(self, im, y=None): return compose(im, y, self.tfms)
def __repr__(self): return str(self.tfms)
def A(*a): return np.array(a[0]) if len(a)==1 else [np.array(o) for o in a]
class Denormalize():
""" De-normalizes an image, returning it to original format.
"""
def __init__(self, m, s):
self.m=np.array(m, dtype=np.float32)
self.s=np.array(s, dtype=np.float32)
def __call__(self, x): return x*self.s+self.m
class Normalize():
""" Normalizes an image to zero mean and unit standard deviation, given the mean m and std s of the original image """
def __init__(self, m, s, tfm_y=TfmType.NO):
self.m=np.array(m, dtype=np.float32)
self.s=np.array(s, dtype=np.float32)
self.tfm_y=tfm_y
def __call__(self, x, y=None):
x = (x-self.m)/self.s
if self.tfm_y==TfmType.PIXEL and y is not None: y = (y-self.m)/self.s
return x,y
class Transform():
""" A class that represents a transform.
All other transforms should subclass it. All subclasses should override
do_transform.
Arguments
---------
tfm_y : TfmType
type of transform
"""
def __init__(self, tfm_y=TfmType.NO):
self.tfm_y=tfm_y
self.store = threading.local()
def set_state(self): pass
def __call__(self, x, y):
self.set_state()
x,y = ((self.transform(x),y) if self.tfm_y==TfmType.NO
else self.transform(x,y) if self.tfm_y in (TfmType.PIXEL, TfmType.CLASS)
else self.transform_coord(x,y))
return x, y
def transform_coord(self, x, y): return self.transform(x),y
def transform(self, x, y=None):
x = self.do_transform(x,False)
return (x, self.do_transform(y,True)) if y is not None else x
# @abstractmethod
# def do_transform(self, x, is_y): raise NotImplementedError
class CoordTransform(Transform):
""" A coordinate transform. """
@staticmethod
def make_square(y, x):
r,c,*_ = x.shape
y1 = np.zeros((r, c))
y = y.astype(np.int)
y1[y[0]:y[2], y[1]:y[3]] = 1.
return y1
def map_y(self, y0, x):
y = CoordTransform.make_square(y0, x)
y_tr = self.do_transform(y, True)
return to_bb(y_tr, y)
def transform_coord(self, x, ys):
yp = partition(ys, 4)
y2 = [self.map_y(y,x) for y in yp]
x = self.do_transform(x, False)
return x, np.concatenate(y2)
class Scale(CoordTransform):
""" A transformation that scales the min size to sz.
Arguments:
sz: int
target size to scale minimum size.
tfm_y: TfmType
type of y transformation.
"""
def __init__(self, sz, tfm_y=TfmType.NO, sz_y=None):
super().__init__(tfm_y)
self.sz,self.sz_y = sz,sz_y
def do_transform(self, x, is_y):
if is_y: return scale_min(x, self.sz_y, cv2.INTER_NEAREST)
else : return scale_min(x, self.sz, cv2.INTER_AREA )
class NoCrop(CoordTransform):
""" A transformation that resize to a square image without cropping.
This transforms (optionally) resizes x,y at with the same parameters.
Arguments:
targ: int
target size of the crop.
tfm_y (TfmType): type of y transformation.
"""
def __init__(self, sz, tfm_y=TfmType.NO, sz_y=None):
super().__init__(tfm_y)
self.sz,self.sz_y = sz,sz_y
def do_transform(self, x, is_y):
if is_y: return no_crop(x, self.sz_y, cv2.INTER_NEAREST)
else : return no_crop(x, self.sz, cv2.INTER_AREA )
imagenet_stats = A([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
stats = imagenet_stats
tfm_norm = Normalize(*stats, TfmType.NO)
tfm_denorm = Denormalize(*stats)
def image_gen(normalizer, denorm, sz, tfms=None, max_zoom=None, pad=0, crop_type=None,
tfm_y=None, sz_y=None, pad_mode=cv2.BORDER_REFLECT):
"""
Generate a standard set of transformations
Arguments
---------
normalizer :
image normalizing function
denorm :
image denormalizing function
sz :
size, sz_y = sz if not specified.
tfms :
iterable collection of transformation functions
max_zoom : float,
maximum zoom
pad : int,
padding on top, left, right and bottom
crop_type :
crop type
tfm_y :
y axis specific transformations
sz_y :
y size, height
pad_mode :
cv2 padding style: repeat, reflect, etc.
Returns
-------
type : ``Transforms``
transformer for specified image operations.
See Also
--------
Transforms: the transformer object returned by this function
"""
if tfm_y is None: tfm_y=TfmType.NO
if tfms is None: tfms=[]
elif not isinstance(tfms, collections.Iterable): tfms=[tfms]
if sz_y is None: sz_y = sz
scale = [RandomScale(sz, max_zoom, tfm_y=tfm_y, sz_y=sz_y) if max_zoom is not None
else Scale(sz, tfm_y, sz_y=sz_y)]
if pad: scale.append(AddPadding(pad, mode=pad_mode))
if crop_type!=CropType.GOOGLENET: tfms=scale+tfms
return Transforms(sz, tfms, normalizer, denorm, crop_type,
tfm_y=tfm_y, sz_y=sz_y)
crop_fn_lu = {CropType.NO: NoCrop}
def compose(im, y, fns):
""" apply a collection of transformation functions fns to images
"""
for fn in fns:
#pdb.set_trace()
im, y =fn(im, y)
return im if y is None else (im, y)
def scale_min(im, targ, interpolation=cv2.INTER_AREA):
""" Scales the image so that the smallest axis is of size targ.
Arguments:
im (array): image
targ (int): target size
"""
r,c,*_ = im.shape
ratio = targ/min(r,c)
sz = (scale_to(c, ratio, targ), scale_to(r, ratio, targ))
return cv2.resize(im, sz, interpolation=interpolation)
def scale_to(x, ratio, targ):
'''
no clue, does not work.
'''
return max(math.floor(x*ratio), targ)
def crop(im, r, c, sz):
'''
crop image into a square of size sz,
'''
return im[r:r+sz, c:c+sz]
def no_crop(im, min_sz=None, interpolation=cv2.INTER_AREA):
""" Returns a squared resized image """
r,c,*_ = im.shape
if min_sz is None: min_sz = min(r,c)
return cv2.resize(im, (min_sz, min_sz), interpolation=interpolation)
# -------- end val_tfms stuff
def preproc_img(img):
val_tfm = image_gen(tfm_norm, tfm_denorm, sz, pad=0, crop_type=CropType.NO, tfm_y=None, sz_y=None)
trans_img = val_tfm(img)
return Variable(torch.FloatTensor(trans_img)).unsqueeze_(0)
def gen_anchors(anc_grids, anc_zooms, anc_ratios):
anchor_scales = [(anz*i,anz*j) for anz in anc_zooms for (i,j) in anc_ratios]
k = len(anchor_scales)
anc_offsets = [1/(o*2) for o in anc_grids]
anc_x = np.concatenate([np.tile(np.linspace(ao, 1-ao, ag), ag) for ao,ag in zip(anc_offsets,anc_grids)])
anc_y = np.concatenate([np.repeat(np.linspace(ao, 1-ao, ag), ag) for ao,ag in zip(anc_offsets,anc_grids)])
anc_ctrs = np.repeat(np.stack([anc_x,anc_y], axis=1), k, axis=0)
anc_sizes = np.concatenate([np.array([[o/ag,p/ag] for i in range(ag*ag) for o,p in anchor_scales]) for ag in anc_grids])
grid_sizes_np = np.concatenate([np.array([ 1/ag for i in range(ag*ag) for o,p in anchor_scales]) for ag in anc_grids])
anchors_np = np.concatenate([anc_ctrs, anc_sizes], axis=1)
anchors = Variable(torch.FloatTensor(anchors_np))
grid_sizes = Variable(torch.FloatTensor(grid_sizes_np)).unsqueeze(1)
return anchors, grid_sizes
#gen ancs
anc_grids = [28,14,7,4,2]
anc_zooms = [2**(0/3),2**(1/3),2**(2/3)]
anc_ratios = [(1.,1.), (.5,1.), (1.,.5)]
anchors, grid_sizes = gen_anchors(anc_grids, anc_zooms, anc_ratios)
def hw2corners(ctr, hw): return torch.cat([ctr-hw/2, ctr+hw/2], dim=1)
def actn_to_bb(actn, anchors, grid_sizes):
actn_bbs = torch.tanh(actn)
actn_centers = (actn_bbs[:,:2]/2 * grid_sizes) + anchors[:,:2]
actn_hw = (actn_bbs[:,2:]/2+1) * anchors[:,2:]
return hw2corners(actn_centers, actn_hw)
def to_np(v):
if isinstance(v, (np.ndarray, np.generic)): return v
if isinstance(v, (list,tuple)): return [to_np(o) for o in v]
if isinstance(v, Variable): v=v.data
if isinstance(v, torch.cuda.HalfTensor): v=v.float()
return v.cpu().numpy()
def pred2dict(bb_np,score,cat_str):
# convert to top left x,y bottom right x,y
return {"x1": bb_np[1],
"x2": bb_np[3],
"y1": bb_np[0],
"y2": bb_np[2],
"score": score,
"category": cat_str}
# non max suppression
def nms(boxes, scores, overlap=0.5, top_k=100):
keep = scores.new(scores.size(0)).zero_().long()
if boxes.numel() == 0: return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(0) # sort in ascending order
idx = idx[-top_k:] # indices of the top-k largest vals
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
count = 0
while idx.numel() > 0:
i = idx[-1] # index of current largest val
keep[count] = i
count += 1
if idx.size(0) == 1: break
idx = idx[:-1] # remove kept element from view
# load bboxes of next highest vals
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w*h
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter/union # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
return keep, count
def nms_preds(a_ic, p_cl, cl):
nms_bb, nms_pr, nms_id = [],[],[]
conf_scores = p_cl.sigmoid()[0].t().data
boxes = a_ic.view(-1, 4)
scores = conf_scores[cl]
if len(scores)>0:
ids, count = nms(boxes.data, scores, 0.4, 50)
ids = ids[:count]
nms_pr.append(scores[ids])
nms_bb.append(boxes.data[ids])
nms_id.append([cl]*count)
else: nms_bb, nms_pr, nms_id = [[-1.,-1.,-1.,-1.,]],[[-1]],[[-1]]
# return in order of a_ic, clas id, clas_pr
return Variable(torch.FloatTensor(nms_bb[0])), Variable(torch.FloatTensor(nms_pr[0])), np.asarray(nms_id[0])
def get_predictions(img, nms=True):
img_t = preproc_img(img)
model = load_model()
#make predictions
p_cl, p_bb = model(img_t)
#convert bb and clas
a_ic = actn_to_bb(p_bb[0], anchors, grid_sizes)
clas_pr, clas_ids = p_cl[0].max(1)
clas_pr = clas_pr.sigmoid()
clas_ids = to_np(clas_ids)
#non max suppression (optional)
#cl = 1 hardcoded for now, bug with cl=0 to be fixed
if nms: a_ic, clas_pr, clas_ids = nms_preds(a_ic, p_cl, 1)
preds = []
for i,a in enumerate(a_ic):
cat_str = 'bg' if clas_ids[i]==len(id2cat) else id2cat[clas_ids[i]]
score = to_np(clas_pr[i])[0].astype('float64')*100
bb_np = to_np(a).astype('float64')
preds.append(pred2dict(bb_np,score,cat_str))
return {
"bboxes": preds
}
def get_predictions1():
main('dataset')
def load_model():
dst = app.config['MODEL_FILE']
# model = torch.load(dst)
if os.path.isfile(dst):
model = torch.load(dst)
else:
dl_url = 'https://www.dropbox.com/s/e1gnf7oj7qdctlw/cocomodel_0502.pt?dl=1'
with urlopen(dl_url) as u, NamedTemporaryFile(delete=False) as f:
f.write(u.read())
shutil.move(f.name, dst)
model = torch.load(dst)
return model
|
the-stack_106_30347 | import argparse
import importlib
import io
import json
import pathlib
import sys
import time
from clvm import to_sexp_f, KEYWORD_FROM_ATOM, KEYWORD_TO_ATOM, SExp
from clvm.EvalError import EvalError
from clvm.serialize import sexp_from_stream, sexp_to_stream
from clvm.operators import OP_REWRITE
from ir import reader
from . import binutils
from .debug import make_trace_pre_eval, trace_to_text, trace_to_table
from .sha256tree import sha256tree
try:
from clvm_rs import deserialize_and_run_program2, STRICT_MODE
except ImportError:
deserialize_and_run_program2 = None
def path_or_code(arg):
try:
with open(arg) as f:
return f.read()
except IOError:
return arg
def stream_to_bin(write_f):
b = io.BytesIO()
write_f(b)
return b.getvalue()
def call_tool(tool_name, desc, conversion, input_args):
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"-H", "--script-hash", action="store_true", help="Show only sha256 tree hash of program"
)
parser.add_argument(
"path_or_code",
nargs="*",
type=path_or_code,
help="path to clvm script, or literal script",
)
sys.setrecursionlimit(20000)
args = parser.parse_args(args=input_args[1:])
for program in args.path_or_code:
if program == "-":
program = sys.stdin.read()
sexp, text = conversion(program)
if args.script_hash:
print(sha256tree(sexp).hex())
elif text:
print(text)
def opc(args=sys.argv):
def conversion(text):
try:
ir_sexp = reader.read_ir(text)
sexp = binutils.assemble_from_ir(ir_sexp)
except SyntaxError as ex:
print("%s" % ex.msg)
return None, None
return sexp, sexp.as_bin().hex()
call_tool("opc", "Compile a clvm script.", conversion, args)
def opd(args=sys.argv):
def conversion(blob):
sexp = sexp_from_stream(io.BytesIO(bytes.fromhex(blob)), to_sexp_f)
return sexp, binutils.disassemble(sexp)
call_tool("opd", "Disassemble a compiled clvm script from hex.", conversion, args)
def stage_import(stage):
stage_path = "stages.stage_%s" % stage
try:
return importlib.import_module(stage_path)
except ImportError:
raise ValueError("bad stage: %s" % stage)
def as_bin(streamer_f):
f = io.BytesIO()
streamer_f(f)
return f.getvalue()
def run(args=sys.argv):
return launch_tool(args, "run", default_stage=2)
def brun(args=sys.argv):
return launch_tool(args, "brun")
def calculate_cost_offset(run_program, run_script: SExp):
"""
These commands are used by the test suite, and many of them expect certain costs.
If boilerplate invocation code changes by a fixed cost, you can tweak this
value so you don't have to change all the tests' expected costs.
Eventually you should re-tare this to zero and alter the tests' costs though.
This is a hack and need to go away, probably when we do dialects for real,
and then the dialect can have a `run_program` API.
"""
null = binutils.assemble("0")
cost, _r = run_program(run_script, null.cons(null))
return 53 - cost
def launch_tool(args, tool_name, default_stage=0):
sys.setrecursionlimit(20000)
parser = argparse.ArgumentParser(
description='Execute a clvm script.'
)
parser.add_argument(
"--strict", action="store_true",
help="Unknown opcodes are always fatal errors in strict mode")
parser.add_argument(
"-x", "--hex", action="store_true",
help="Read program and environment as hexadecimal bytecode")
parser.add_argument(
"-s", "--stage", type=stage_import,
help="stage number to include", default=stage_import(default_stage))
parser.add_argument(
"-v", "--verbose", action="store_true",
help="Display resolve of all reductions, for debugging")
parser.add_argument(
"-t", "--table", action="store_true",
help="Print diagnostic table of reductions, for debugging")
parser.add_argument(
"-c", "--cost", action="store_true", help="Show cost")
parser.add_argument(
"--time", action="store_true", help="Print execution time")
parser.add_argument(
"-m", "--max-cost", type=int, default=11000000000, help="Maximum cost")
parser.add_argument(
"-d", "--dump", action="store_true",
help="dump hex version of final output")
parser.add_argument(
"--quiet", action="store_true", help="Suppress printing the program result")
parser.add_argument(
"-y", "--symbol-table", type=pathlib.Path,
help=".SYM file generated by compiler")
parser.add_argument(
"-n", "--no-keywords", action="store_true",
help="Output result as data, not as a program")
parser.add_argument("--backend", type=str, help="force use of 'rust' or 'python' backend")
parser.add_argument(
"-i",
"--include",
type=pathlib.Path,
help="add a search path for included files",
action="append",
default=[],
)
parser.add_argument(
"path_or_code", type=path_or_code,
help="filepath to clvm script, or a literal script")
parser.add_argument(
"env", nargs="?", type=path_or_code,
help="clvm script environment, as clvm src, or hex")
args = parser.parse_args(args=args[1:])
keywords = {} if args.no_keywords else KEYWORD_FROM_ATOM
if hasattr(args.stage, "run_program_for_search_paths"):
run_program = args.stage.run_program_for_search_paths(args.include)
else:
run_program = args.stage.run_program
input_serialized = None
input_sexp = None
time_start = time.perf_counter()
if args.hex:
assembled_serialized = bytes.fromhex(args.path_or_code)
if not args.env:
args.env = "80"
env_serialized = bytes.fromhex(args.env)
time_read_hex = time.perf_counter()
input_serialized = b"\xff" + assembled_serialized + env_serialized
else:
src_text = args.path_or_code
try:
src_sexp = reader.read_ir(src_text)
except SyntaxError as ex:
print("FAIL: %s" % (ex))
return -1
assembled_sexp = binutils.assemble_from_ir(src_sexp)
if not args.env:
args.env = "()"
env_ir = reader.read_ir(args.env)
env = binutils.assemble_from_ir(env_ir)
time_assemble = time.perf_counter()
input_sexp = to_sexp_f((assembled_sexp, env))
pre_eval_f = None
symbol_table = None
log_entries = []
if args.symbol_table:
with open(args.symbol_table) as f:
symbol_table = json.load(f)
pre_eval_f = make_trace_pre_eval(log_entries, symbol_table)
elif args.verbose or args.table:
pre_eval_f = make_trace_pre_eval(log_entries)
run_script = getattr(args.stage, tool_name)
cost = 0
cost_offset = calculate_cost_offset(run_program, run_script)
try:
output = "(didn't finish)"
use_rust = (
(tool_name != "run")
and not pre_eval_f
and (
args.backend == "rust"
or (deserialize_and_run_program2 and args.backend != "python")
)
)
max_cost = max(0, args.max_cost - cost_offset if args.max_cost != 0 else 0)
if use_rust:
if input_serialized is None:
input_serialized = input_sexp.as_bin()
run_script = run_script.as_bin()
time_parse_input = time.perf_counter()
# build the opcode look-up table
# this should eventually be subsumed by "Dialect" api
native_opcode_names_by_opcode = dict(
("op_%s" % OP_REWRITE.get(k, k), op)
for op, k in KEYWORD_FROM_ATOM.items()
if k not in "qa."
)
cost, result = deserialize_and_run_program2(
run_script,
input_serialized,
KEYWORD_TO_ATOM["q"][0],
KEYWORD_TO_ATOM["a"][0],
native_opcode_names_by_opcode,
max_cost,
STRICT_MODE if args.strict else 0,
)
time_done = time.perf_counter()
result = SExp.to(result)
else:
if input_sexp is None:
input_sexp = sexp_from_stream(io.BytesIO(input_serialized), to_sexp_f)
time_parse_input = time.perf_counter()
cost, result = run_program(
run_script, input_sexp, max_cost=max_cost, pre_eval_f=pre_eval_f, strict=args.strict)
time_done = time.perf_counter()
if args.cost:
cost += cost_offset if cost > 0 else 0
print("cost = %d" % cost)
if args.time:
if args.hex:
print('read_hex: %f' % (time_read_hex - time_start))
else:
print('assemble_from_ir: %f' % (time_assemble - time_start))
print('to_sexp_f: %f' % (time_parse_input - time_assemble))
print('run_program: %f' % (time_done - time_parse_input))
if args.dump:
blob = as_bin(lambda f: sexp_to_stream(result, f))
output = blob.hex()
elif args.quiet:
output = ''
else:
output = binutils.disassemble(result, keywords)
except EvalError as ex:
result = to_sexp_f(ex._sexp)
output = "FAIL: %s %s" % (ex, binutils.disassemble(result, keywords))
return -1
except Exception as ex:
output = str(ex)
raise
finally:
print(output)
if args.verbose or symbol_table:
print()
trace_to_text(log_entries, binutils.disassemble, symbol_table)
if args.table:
trace_to_table(log_entries, binutils.disassemble, symbol_table)
def read_ir(args=sys.argv):
parser = argparse.ArgumentParser(
description='Read script and tokenize to IR.'
)
parser.add_argument(
"script", help="script in hex or uncompiled text")
args = parser.parse_args(args=args[1:])
sexp = reader.read_ir(args.script)
blob = stream_to_bin(lambda f: sexp_to_stream(sexp, f))
print(blob.hex())
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
the-stack_106_30348 | """
========================================
Regression on continuous data (rER[P/F])
========================================
This demonstrates how rER[P/F]s - regressing the continuous data - is a
generalisation of traditional averaging. If all preprocessing steps
are the same, no overlap between epochs exists, and if all
predictors are binary, regression is virtually identical to traditional
averaging.
If overlap exists and/or predictors are continuous, traditional averaging
is inapplicable, but regression can estimate effects, including those of
continuous predictors.
rERPs are described in:
Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
waveforms: II. Non-linear effects, overlap correction, and practical
considerations. Psychophysiology, 52(2), 169-189.
"""
# Authors: Jona Sassenhagen <[email protected]>
#
# License: BSD-3-Clause
# %%
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.stats.regression import linear_regression_raw
# Load and preprocess data
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname)
raw.pick_types(meg='grad', stim=True, eeg=False).load_data()
raw.filter(1, None, fir_design='firwin') # high-pass
# Set up events
events = mne.find_events(raw)
event_id = {'Aud/L': 1, 'Aud/R': 2}
tmin, tmax = -.1, .5
# regular epoching
picks = mne.pick_types(raw.info, meg=True)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=None,
baseline=None, preload=True, verbose=False)
# rERF
evokeds = linear_regression_raw(raw, events=events, event_id=event_id,
reject=None, tmin=tmin, tmax=tmax)
# linear_regression_raw returns a dict of evokeds
# select conditions similarly to mne.Epochs objects
# plot both results, and their difference
cond = "Aud/L"
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
params = dict(spatial_colors=True, show=False, ylim=dict(grad=(-200, 200)),
time_unit='s')
epochs[cond].average().plot(axes=ax1, **params)
evokeds[cond].plot(axes=ax2, **params)
contrast = mne.combine_evoked([evokeds[cond], epochs[cond].average()],
weights=[1, -1])
contrast.plot(axes=ax3, **params)
ax1.set_title("Traditional averaging")
ax2.set_title("rERF")
ax3.set_title("Difference")
plt.show()
|
the-stack_106_30350 | import re
import sys
import string
from toolz import pluck
from brocclib.get_xml import get_lineage
from brocclib.taxonomy import Lineage
fields = lambda f: iter(map(string.strip, l.split('\t')) for l in f)
isnumeric = re.compile(r'\d+').match
standard = lambda d: ";".join( Lineage(d).get_standard_taxa("species") )
full = lambda d: ";".join( Lineage(d).get_all_taxa("species") )
def get_lin(line):
if all(( type(line) in (tuple, list),
len(line) >=5,
isnumeric(line[-1]) )):
return get_lineage(line[-1])
def main(out_standard, out_full, cont_=None):
with open(out_standard, 'w') as out_s, open(out_full, 'w') as out_f:
fs = fields(sys.stdin)
for line in fs:
if cont_ is not None and line[-1] != cont_:
continue
elif cont_ is not None and line[-1] == cont_:
cont_ = None
try:
lineage = get_lin(line)
print >> out_s, "\t".join(line + [standard(lineage)])
print >> out_f, "\t".join(line + [full(lineage)])
except Exception as e:
print >> sys.stderr, "Couldn't get %s: %s"%(line[0], str(e))
continue
print >> sys.stderr, "finished %s"%(line[0])
if __name__ == "__main__":
ret = main(*sys.argv[1:])
sys.exit(ret)
|
the-stack_106_30351 | from tracardi_dot_notation.dot_accessor import DotAccessor
from tracardi.process_engine.tql.transformer.transformer_namespace import TransformerNamespace
from lark import v_args, Token
@v_args(inline=True)
class CalcTransformer(TransformerNamespace):
from operator import add, sub, mul, truediv as div, neg
number = float
def __init__(self, dot, *args, **kwargs):
if not isinstance(dot, DotAccessor):
raise ValueError("Data passed to ExprTransformer must be type of DotAccessor.")
super().__init__(*args, **kwargs)
# self.namespace('uql_function__', FunctionTransformer())
self._dot = dot
self.vars = {}
def assign_var(self, *args):
token, value = args
if token.type == "NAME":
self.vars[token.value] = value
elif token.type == "FIELD":
self._dot[token.value] = value
return value
def field(self, field):
value = self._dot[field]
if isinstance(value, str):
try:
value = float(value)
except Exception:
raise ValueError(f"Field `{field}` is a string. System tried to parse it into number, but it failed.")
if isinstance(value, float) or isinstance(value, int):
return value
raise ValueError(f"Field `{field}` is not a number it is a `{type(field)}`.")
def var(self, name):
try:
return self.vars[name]
except KeyError:
raise Exception(f"Variable `{name}` not found")
|
the-stack_106_30352 | import py
import pytest
from xdist.workermanage import NodeManager
from xdist.scheduler import (
EachScheduling,
LoadScheduling,
LoadScopeScheduling,
LoadFileScheduling,
)
from six.moves.queue import Empty, Queue
class Interrupted(KeyboardInterrupt):
""" signals an immediate interruption. """
class DSession(object):
"""A pytest plugin which runs a distributed test session
At the beginning of the test session this creates a NodeManager
instance which creates and starts all nodes. Nodes then emit
events processed in the pytest_runtestloop hook using the worker_*
methods.
Once a node is started it will automatically start running the
pytest mainloop with some custom hooks. This means a node
automatically starts collecting tests. Once tests are collected
it will wait for instructions.
"""
def __init__(self, config):
self.config = config
self.log = py.log.Producer("dsession")
if not config.option.debug:
py.log.setconsumer(self.log._keywords, None)
self.nodemanager = None
self.sched = None
self.shuttingdown = False
self.countfailures = 0
self.maxfail = config.getvalue("maxfail")
self.queue = Queue()
self._session = None
self._failed_collection_errors = {}
self._active_nodes = set()
self._failed_nodes_count = 0
self._max_worker_restart = get_default_max_worker_restart(self.config)
# summary message to print at the end of the session
self._summary_report = None
self.terminal = config.pluginmanager.getplugin("terminalreporter")
if self.terminal:
self.trdist = TerminalDistReporter(config)
config.pluginmanager.register(self.trdist, "terminaldistreporter")
@property
def session_finished(self):
"""Return True if the distributed session has finished
This means all nodes have executed all test items. This is
used by pytest_runtestloop to break out of its loop.
"""
return bool(self.shuttingdown and not self._active_nodes)
def report_line(self, line):
if self.terminal and self.config.option.verbose >= 0:
self.terminal.write_line(line)
@pytest.mark.trylast
def pytest_sessionstart(self, session):
"""Creates and starts the nodes.
The nodes are setup to put their events onto self.queue. As
soon as nodes start they will emit the worker_workerready event.
"""
self.nodemanager = NodeManager(self.config)
nodes = self.nodemanager.setup_nodes(putevent=self.queue.put)
self._active_nodes.update(nodes)
self._session = session
def pytest_sessionfinish(self, session):
"""Shutdown all nodes."""
nm = getattr(self, "nodemanager", None) # if not fully initialized
if nm is not None:
nm.teardown_nodes()
self._session = None
def pytest_collection(self):
# prohibit collection of test items in master process
return True
@pytest.mark.trylast
def pytest_xdist_make_scheduler(self, config, log):
dist = config.getvalue("dist")
schedulers = {
"each": EachScheduling,
"load": LoadScheduling,
"loadscope": LoadScopeScheduling,
"loadfile": LoadFileScheduling,
}
return schedulers[dist](config, log)
def pytest_runtestloop(self):
self.sched = self.config.hook.pytest_xdist_make_scheduler(
config=self.config, log=self.log
)
assert self.sched is not None
self.shouldstop = False
while not self.session_finished:
self.loop_once()
if self.shouldstop:
self.triggershutdown()
raise Interrupted(str(self.shouldstop))
return True
def loop_once(self):
"""Process one callback from one of the workers."""
while 1:
if not self._active_nodes:
# If everything has died stop looping
self.triggershutdown()
raise RuntimeError("Unexpectedly no active workers available")
try:
eventcall = self.queue.get(timeout=2.0)
break
except Empty:
continue
callname, kwargs = eventcall
assert callname, kwargs
method = "worker_" + callname
call = getattr(self, method)
self.log("calling method", method, kwargs)
call(**kwargs)
if self.sched.tests_finished:
self.triggershutdown()
#
# callbacks for processing events from workers
#
def worker_workerready(self, node, workerinfo):
"""Emitted when a node first starts up.
This adds the node to the scheduler, nodes continue with
collection without any further input.
"""
node.workerinfo = workerinfo
node.workerinfo["id"] = node.gateway.id
node.workerinfo["spec"] = node.gateway.spec
# TODO: (#234 task) needs this for pytest. Remove when refactor in pytest repo
node.slaveinfo = node.workerinfo
self.config.hook.pytest_testnodeready(node=node)
if self.shuttingdown:
node.shutdown()
else:
self.sched.add_node(node)
def worker_workerfinished(self, node):
"""Emitted when node executes its pytest_sessionfinish hook.
Removes the node from the scheduler.
The node might not be in the scheduler if it had not emitted
workerready before shutdown was triggered.
"""
self.config.hook.pytest_testnodedown(node=node, error=None)
if node.workeroutput["exitstatus"] == 2: # keyboard-interrupt
self.shouldstop = "%s received keyboard-interrupt" % (node,)
self.worker_errordown(node, "keyboard-interrupt")
return
if node in self.sched.nodes:
crashitem = self.sched.remove_node(node)
assert not crashitem, (crashitem, node)
self._active_nodes.remove(node)
def worker_errordown(self, node, error):
"""Emitted by the WorkerController when a node dies."""
self.config.hook.pytest_testnodedown(node=node, error=error)
try:
crashitem = self.sched.remove_node(node)
except KeyError:
pass
else:
if crashitem:
self.handle_crashitem(crashitem, node)
self._failed_nodes_count += 1
maximum_reached = (
self._max_worker_restart is not None
and self._failed_nodes_count > self._max_worker_restart
)
if maximum_reached:
if self._max_worker_restart == 0:
msg = "worker {} crashed and worker restarting disabled".format(
node.gateway.id
)
else:
msg = "maximum crashed workers reached: %d" % self._max_worker_restart
self._summary_report = msg
self.report_line("\n" + msg)
self.triggershutdown()
else:
self.report_line("\nreplacing crashed worker %s" % node.gateway.id)
self._clone_node(node)
self._active_nodes.remove(node)
def pytest_terminal_summary(self, terminalreporter):
if self.config.option.verbose >= 0 and self._summary_report:
terminalreporter.write_sep("=", "xdist: {}".format(self._summary_report))
def worker_collectionfinish(self, node, ids):
"""worker has finished test collection.
This adds the collection for this node to the scheduler. If
the scheduler indicates collection is finished (i.e. all
initial nodes have submitted their collections), then tells the
scheduler to schedule the collected items. When initiating
scheduling the first time it logs which scheduler is in use.
"""
if self.shuttingdown:
return
self.config.hook.pytest_xdist_node_collection_finished(node=node, ids=ids)
# tell session which items were effectively collected otherwise
# the master node will finish the session with EXIT_NOTESTSCOLLECTED
self._session.testscollected = len(ids)
self.sched.add_node_collection(node, ids)
if self.terminal:
self.trdist.setstatus(node.gateway.spec, "[%d]" % (len(ids)))
if self.sched.collection_is_completed:
if self.terminal and not self.sched.has_pending:
self.trdist.ensure_show_status()
self.terminal.write_line("")
if self.config.option.verbose > 0:
self.terminal.write_line(
"scheduling tests via %s" % (self.sched.__class__.__name__)
)
self.sched.schedule()
def worker_logstart(self, node, nodeid, location):
"""Emitted when a node calls the pytest_runtest_logstart hook."""
self.config.hook.pytest_runtest_logstart(nodeid=nodeid, location=location)
def worker_logfinish(self, node, nodeid, location):
"""Emitted when a node calls the pytest_runtest_logfinish hook."""
self.config.hook.pytest_runtest_logfinish(nodeid=nodeid, location=location)
def worker_testreport(self, node, rep):
"""Emitted when a node calls the pytest_runtest_logreport hook."""
rep.node = node
self.config.hook.pytest_runtest_logreport(report=rep)
self._handlefailures(rep)
def worker_runtest_protocol_complete(self, node, item_index, duration):
"""
Emitted when a node fires the 'runtest_protocol_complete' event,
signalling that a test has completed the runtestprotocol and should be
removed from the pending list in the scheduler.
"""
self.sched.mark_test_complete(node, item_index, duration)
def worker_collectreport(self, node, rep):
"""Emitted when a node calls the pytest_collectreport hook.
Because we only need the report when there's a failure/skip, as optimization
we only expect to receive failed/skipped reports from workers (#330).
"""
assert not rep.passed
self._failed_worker_collectreport(node, rep)
def worker_logwarning(self, message, code, nodeid, fslocation):
"""Emitted when a node calls the pytest_logwarning hook."""
kwargs = dict(message=message, code=code, nodeid=nodeid, fslocation=fslocation)
self.config.hook.pytest_logwarning.call_historic(kwargs=kwargs)
def worker_warning_captured(self, warning_message, when, item):
"""Emitted when a node calls the pytest_logwarning hook."""
kwargs = dict(warning_message=warning_message, when=when, item=item)
self.config.hook.pytest_warning_captured.call_historic(kwargs=kwargs)
def _clone_node(self, node):
"""Return new node based on an existing one.
This is normally for when a node dies, this will copy the spec
of the existing node and create a new one with a new id. The
new node will have been setup so it will start calling the
"worker_*" hooks and do work soon.
"""
spec = node.gateway.spec
spec.id = None
self.nodemanager.group.allocate_id(spec)
node = self.nodemanager.setup_node(spec, self.queue.put)
self._active_nodes.add(node)
return node
def _failed_worker_collectreport(self, node, rep):
# Check we haven't already seen this report (from
# another worker).
if rep.longrepr not in self._failed_collection_errors:
self._failed_collection_errors[rep.longrepr] = True
self.config.hook.pytest_collectreport(report=rep)
self._handlefailures(rep)
def _handlefailures(self, rep):
if rep.failed:
self.countfailures += 1
if self.maxfail and self.countfailures >= self.maxfail:
self.shouldstop = "stopping after %d failures" % (self.countfailures)
def triggershutdown(self):
self.log("triggering shutdown")
self.shuttingdown = True
for node in self.sched.nodes:
node.shutdown()
def handle_crashitem(self, nodeid, worker):
# XXX get more reporting info by recording pytest_runtest_logstart?
# XXX count no of failures and retry N times
runner = self.config.pluginmanager.getplugin("runner")
fspath = nodeid.split("::")[0]
msg = "worker %r crashed while running %r" % (worker.gateway.id, nodeid)
rep = runner.TestReport(
nodeid, (fspath, None, fspath), (), "failed", msg, "???"
)
rep.node = worker
self.config.hook.pytest_runtest_logreport(report=rep)
class TerminalDistReporter(object):
def __init__(self, config):
self.config = config
self.tr = config.pluginmanager.getplugin("terminalreporter")
self._status = {}
self._lastlen = 0
self._isatty = getattr(self.tr, "isatty", self.tr.hasmarkup)
def write_line(self, msg):
self.tr.write_line(msg)
def ensure_show_status(self):
if not self._isatty:
self.write_line(self.getstatus())
def setstatus(self, spec, status, show=True):
self._status[spec.id] = status
if show and self._isatty:
self.rewrite(self.getstatus())
def getstatus(self):
if self.config.option.verbose >= 0:
parts = ["%s %s" % (spec.id, self._status[spec.id]) for spec in self._specs]
return " / ".join(parts)
else:
return "bringing up nodes..."
def rewrite(self, line, newline=False):
pline = line + " " * max(self._lastlen - len(line), 0)
if newline:
self._lastlen = 0
pline += "\n"
else:
self._lastlen = len(line)
self.tr.rewrite(pline, bold=True)
def pytest_xdist_setupnodes(self, specs):
self._specs = specs
for spec in specs:
self.setstatus(spec, "I", show=False)
self.setstatus(spec, "I", show=True)
self.ensure_show_status()
def pytest_xdist_newgateway(self, gateway):
if self.config.option.verbose > 0:
rinfo = gateway._rinfo()
version = "%s.%s.%s" % rinfo.version_info[:3]
self.rewrite(
"[%s] %s Python %s cwd: %s"
% (gateway.id, rinfo.platform, version, rinfo.cwd),
newline=True,
)
self.setstatus(gateway.spec, "C")
def pytest_testnodeready(self, node):
if self.config.option.verbose > 0:
d = node.workerinfo
infoline = "[%s] Python %s" % (d["id"], d["version"].replace("\n", " -- "))
self.rewrite(infoline, newline=True)
self.setstatus(node.gateway.spec, "ok")
def pytest_testnodedown(self, node, error):
if not error:
return
self.write_line("[%s] node down: %s" % (node.gateway.id, error))
def get_default_max_worker_restart(config):
"""gets the default value of --max-worker-restart option if it is not provided.
Use a reasonable default to avoid workers from restarting endlessly due to crashing collections (#226).
"""
result = config.option.maxworkerrestart
if result is not None:
result = int(result)
elif config.option.numprocesses:
# if --max-worker-restart was not provided, use a reasonable default (#226)
result = config.option.numprocesses * 4
return result
|
the-stack_106_30354 | # Copyright 2019-2021 Canaan Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""System test: test binary"""
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
import pytest
import tensorflow as tf
import numpy as np
from test_runner import TfliteTestRunner
def _make_module(in_shape, v_shape):
class BinaryModule(tf.Module):
def __init__(self):
super(BinaryModule).__init__()
self.v = tf.constant(np.random.rand(*v_shape).astype(np.float32))
@tf.function(input_signature=[tf.TensorSpec(in_shape, tf.float32)])
def __call__(self, x):
outs = []
outs.append(x + self.v)
outs.append(x - self.v)
outs.append(x * self.v)
outs.append(self.v / (2.0 + x))
outs.append(tf.minimum(x, self.v))
outs.append(tf.maximum(x, self.v))
return outs
return BinaryModule()
lhs_shapes = [
[3],
[64, 3],
[3, 64, 3],
[8, 3, 64, 3]
]
rhs_shapes = [
[1],
[3],
[1, 3],
[64, 1],
[64, 3],
[3, 64, 1],
[3, 64, 3],
[8, 3, 64, 1],
[8, 3, 64, 3],
[8, 3, 1, 3],
[8, 1, 64, 3],
[1, 3, 64, 1]
]
@pytest.mark.parametrize('lhs_shape', lhs_shapes)
@pytest.mark.parametrize('rhs_shape', rhs_shapes)
def test_binary(lhs_shape, rhs_shape, request):
module = _make_module(lhs_shape, rhs_shape)
runner = TfliteTestRunner(request.node.name)
model_file = runner.from_tensorflow(module)
runner.run(model_file)
if __name__ == "__main__":
pytest.main(['-vv', 'test_binary.py'])
|
the-stack_106_30356 | import inspect
import json
import os
import random
import shutil
import tempfile
import weakref
from dataclasses import asdict
from functools import wraps
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
import xxhash
from datasets.table import ConcatenationTable, InMemoryTable, MemoryMappedTable, Table
from .info import DatasetInfo
from .utils.logging import get_logger
from .utils.py_utils import dumps
if TYPE_CHECKING:
from .arrow_dataset import Dataset
logger = get_logger(__name__)
# Fingerprinting allows to have one deterministic fingerprint per dataset state.
# A dataset fingerprint is updated after each transform.
# Re-running the same transforms on a dataset in a different session results in the same fingerprint.
# This is possible thanks to a custom hashing function that works with most python objects.
# Fingerprinting is the main mechanism that enables caching.
# The caching mechanism allows to reload an existing cache file if it's already been computed.
#################
# Caching
#################
_CACHING_ENABLED = True
_TEMP_DIR_FOR_TEMP_CACHE_FILES: Optional["_TempDirWithCustomCleanup"] = None
_DATASETS_WITH_TABLE_IN_TEMP_DIR: Optional[weakref.WeakSet] = None
class _TempDirWithCustomCleanup:
"""
A temporary directory with a custom cleanup function.
We need a custom temporary directory cleanup in order to delete the dataset objects that have
cache files in the temporary directory before deleting the dorectory itself.
"""
def __init__(self, cleanup_func=None, *cleanup_func_args, **cleanup_func_kwargs):
self.name = tempfile.mkdtemp()
self._finalizer = weakref.finalize(self, self._cleanup)
self._cleanup_func = cleanup_func
self._cleanup_func_args = cleanup_func_args
self._cleanup_func_kwargs = cleanup_func_kwargs
def _cleanup(self):
self._cleanup_func(*self._cleanup_func_args, **self._cleanup_func_kwargs)
if os.path.exists(self.name):
shutil.rmtree(self.name)
def cleanup(self):
if self._finalizer.detach():
self._cleanup()
def maybe_register_dataset_for_temp_dir_deletion(dataset):
"""
This function registers the datasets that have cache files in _TEMP_DIR_FOR_TEMP_CACHE_FILES in order
to properly delete them before deleting the temporary directory.
The temporary directory _TEMP_DIR_FOR_TEMP_CACHE_FILES is used when caching is disabled.
"""
if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:
return
global _DATASETS_WITH_TABLE_IN_TEMP_DIR
if _DATASETS_WITH_TABLE_IN_TEMP_DIR is None:
_DATASETS_WITH_TABLE_IN_TEMP_DIR = weakref.WeakSet()
if any(
Path(_TEMP_DIR_FOR_TEMP_CACHE_FILES.name) in Path(cache_file["filename"]).parents
for cache_file in dataset.cache_files
):
_DATASETS_WITH_TABLE_IN_TEMP_DIR.add(dataset)
def get_datasets_with_cache_file_in_temp_dir():
return list(_DATASETS_WITH_TABLE_IN_TEMP_DIR) if _DATASETS_WITH_TABLE_IN_TEMP_DIR is not None else []
def set_caching_enabled(boolean: bool):
"""
When applying transforms on a dataset, the data are stored in cache files.
The caching mechanism allows to reload an existing cache file if it's already been computed.
Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
after each transform.
If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
More precisely, if the caching is disabled:
- cache files are always recreated
- cache files are written to a temporary directory that is deleted when session closes
- cache files are named using a random hash instead of the dataset fingerprint
- use :func:`datasets.Dataset.save_to_disk` to save a transformed dataset or it will be deleted when session closes
- caching doesn't affect :func:`datasets.load_dataset`. If you want to regenerate a dataset from scratch you should use
the ``download_mode`` parameter in :func:`datasets.load_dataset`.
"""
global _CACHING_ENABLED
_CACHING_ENABLED = bool(boolean)
def is_caching_enabled() -> bool:
"""
When applying transforms on a dataset, the data are stored in cache files.
The caching mechanism allows to reload an existing cache file if it's already been computed.
Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
after each transform.
If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
More precisely, if the caching is disabled:
- cache files are always recreated
- cache files are written to a temporary directory that is deleted when session closes
- cache files are named using a random hash instead of the dataset fingerprint
- use :func:`datasets.Dataset.save_to_disk` to save a transformed dataset or it will be deleted when session closes
- caching doesn't affect :func:`datasets.load_dataset`. If you want to regenerate a dataset from scratch you should use
the ``download_mode`` parameter in :func:`datasets.load_dataset`.
"""
global _CACHING_ENABLED
return bool(_CACHING_ENABLED)
def get_temporary_cache_files_directory() -> str:
"""Return a directory that is deleted when session closes."""
global _TEMP_DIR_FOR_TEMP_CACHE_FILES
if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:
# Avoids a PermissionError on Windows caused by the datasets referencing
# the files from the cache directory on clean-up
def cleanup_func():
for dset in get_datasets_with_cache_file_in_temp_dir():
dset.__del__()
_TEMP_DIR_FOR_TEMP_CACHE_FILES = _TempDirWithCustomCleanup(cleanup_func=cleanup_func)
return _TEMP_DIR_FOR_TEMP_CACHE_FILES.name
#################
# Hashing
#################
def hashregister(*types):
def proxy(func):
for t in types:
Hasher.dispatch[t] = func
return func
return proxy
class Hasher:
"""Hasher that accepts python objects as inputs."""
dispatch: Dict = {}
def __init__(self):
self.m = xxhash.xxh64()
@classmethod
def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str:
value = [value] if isinstance(value, bytes) else value
m = xxhash.xxh64()
for x in value:
m.update(x)
return m.hexdigest()
@classmethod
def hash_default(cls, value: Any) -> str:
return cls.hash_bytes(dumps(value))
@classmethod
def hash(cls, value: Any) -> str:
if type(value) in cls.dispatch:
return cls.dispatch[type(value)](cls, value)
else:
return cls.hash_default(value)
def update(self, value: Any) -> None:
header_for_update = f"=={type(value)}=="
value_for_update = self.hash(value)
self.m.update(header_for_update.encode("utf8"))
self.m.update(value_for_update.encode("utf-8"))
def hexdigest(self) -> str:
return self.m.hexdigest()
# Register a new hasher can be useful for two possible reasons:
# 1 - optimize the hashing of large amount of data (e.g. pa.Table)
# 2 - take advantage of a custom serialization method (e.g. DatasetInfo)
@hashregister(pa.Table, Table, InMemoryTable, MemoryMappedTable, ConcatenationTable)
def _hash_pa_table(hasher, value):
def _hash_pa_array(value):
if isinstance(value, pa.ChunkedArray):
return hasher.hash_bytes(c.to_string().encode("utf-8") for c in value.chunks)
else:
return hasher.hash_bytes(value.to_string().encode("utf-8"))
value = "-".join(col + "-" + _hash_pa_array(value[col]) for col in sorted(value.column_names))
return hasher.hash_bytes(value.encode("utf-8"))
@hashregister(DatasetInfo)
def _hash_dataset_info(hasher, value):
return hasher.hash_bytes(json.dumps(asdict(value), sort_keys=True).encode("utf-8"))
#################
# Fingerprinting
#################
# we show a warning only once when fingerprinting fails to avoid spam
fingerprint_warnings: Dict[str, bool] = {}
def generate_fingerprint(dataset) -> str:
state = dataset.__dict__
hasher = Hasher()
for key in sorted(state):
if key == "_fingerprint":
continue
hasher.update(key)
hasher.update(state[key])
# hash data files last modification timestamps as well
for cache_file in dataset.cache_files:
hasher.update(os.path.getmtime(cache_file["filename"]))
return hasher.hexdigest()
def generate_random_fingerprint(nbits=64) -> str:
return f"{random.getrandbits(nbits):0{nbits//4}x}"
def update_fingerprint(fingerprint, transform, transform_args):
global fingerprint_warnings
hasher = Hasher()
hasher.update(fingerprint)
try:
hasher.update(transform)
except: # noqa various errors might raise here from pickle or dill
if _CACHING_ENABLED:
if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False):
logger.warning(
f"Transform {transform} couldn't be hashed properly, a random hash was used instead. "
"Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. "
"If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. "
"This warning is only showed once. Subsequent hashing failures won't be showed."
)
fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True
else:
logger.info(f"Transform {transform} couldn't be hashed properly, a random hash was used instead.")
else:
logger.info(
f"Transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled."
)
return generate_random_fingerprint()
for key in sorted(transform_args):
hasher.update(key)
try:
hasher.update(transform_args[key])
except: # noqa various errors might raise here from pickle or dill
if _CACHING_ENABLED:
if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False):
logger.warning(
f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. "
"Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. "
"If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. "
"This warning is only showed once. Subsequent hashing failures won't be showed."
)
fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True
else:
logger.info(
f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead."
)
else:
logger.info(
f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled."
)
return generate_random_fingerprint()
return hasher.hexdigest()
def fingerprint_transform(
inplace: bool,
use_kwargs: Optional[List[str]] = None,
ignore_kwargs: Optional[List[str]] = None,
fingerprint_names: Optional[List[str]] = None,
randomized_function: bool = False,
version: Optional[str] = None,
):
"""
Wrapper for dataset transforms to update the dataset fingerprint using ``update_fingerprint``
Args:
inplace (``bool``): If inplace is True, the fingerprint of the dataset is updated inplace.
Otherwise, a parameter "new_fingerprint" is passed to the wrapped method that should take care of
setting the fingerprint of the returned Dataset.
use_kwargs (Optional ``List[str]``): optional white list of argument names to take into account
to update the fingerprint to the wrapped method that should take care of
setting the fingerprint of the returned Dataset. By default all the arguments are used.
ignore_kwargs (Optional ``List[str]``): optional black list of argument names to take into account
to update the fingerprint. Note that ignore_kwargs prevails on use_kwargs.
fingerprint_names (Optional ``List[str]``, defaults to ["new_fingerprint"]):
If the dataset transforms is not inplace and returns a DatasetDict, then it can require
several fingerprints (one per dataset in the DatasetDict). By specifying fingerprint_names,
one fingerprint named after each element of fingerprint_names is going to be passed.
randomized_function (``bool``, defaults to False): If the dataset transform is random and has
optional parameters "seed" and "generator", then you can set randomized_function to True.
This way, even if users set "seed" and "generator" to None, then the fingerprint is
going to be randomly generated depending on numpy's current state. In this case, the
generator is set to np.random.default_rng(np.random.get_state()[1][0]).
version (Optional ``str``): version of the transform. The version is taken into account when
computing the fingerprint. If a datase transform changes (or at least if the output data
that are cached changes), then one should increase the version. If the version stays the
same, then old cached data could be reused that are not compatible with the new transform.
It should be in the format "MAJOR.MINOR.PATCH".
"""
assert use_kwargs is None or isinstance(use_kwargs, list), "use_kwargs is supposed to be a list, not {}".format(
type(use_kwargs)
)
assert ignore_kwargs is None or isinstance(
ignore_kwargs, list
), "ignore_kwargs is supposed to be a list, not {}".format(type(use_kwargs))
assert not inplace or not fingerprint_names, "fingerprint_names are only used when inplace is False"
fingerprint_names = fingerprint_names if fingerprint_names is not None else ["new_fingerprint"]
def _fingerprint(func):
assert inplace or all( # check that not in-place functions require fingerprint parameters
name in func.__code__.co_varnames for name in fingerprint_names
), "function {} is missing parameters {} in signature".format(func, fingerprint_names)
if randomized_function: # randomized function have seed and generator parameters
assert "seed" in func.__code__.co_varnames, "'seed' must be in {}'s signature".format(func)
assert "generator" in func.__code__.co_varnames, "'generator' must be in {}'s signature".format(func)
# this has to be outside the wrapper or since __qualname__ changes in multiprocessing
transform = f"{func.__module__}.{func.__qualname__}"
if version is not None:
transform += f"@{version}"
@wraps(func)
def wrapper(*args, **kwargs):
kwargs_for_fingerprint = kwargs.copy()
if args:
params = [p.name for p in inspect.signature(func).parameters.values() if p != p.VAR_KEYWORD]
self: "Dataset" = args[0]
args = args[1:]
params = params[1:]
kwargs_for_fingerprint.update(zip(params, args))
else:
self: "Dataset" = kwargs.pop("self")
# keep the right kwargs to be hashed to generate the fingerprint
if use_kwargs:
kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k in use_kwargs}
if ignore_kwargs:
kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k not in ignore_kwargs}
if randomized_function: # randomized functions have `seed` and `generator` parameters
if kwargs_for_fingerprint.get("seed") is None and kwargs_for_fingerprint.get("generator") is None:
kwargs_for_fingerprint["generator"] = np.random.default_rng(np.random.get_state()[1][0])
# remove kwargs that are the default values
default_values = {
p.name: p.default for p in inspect.signature(func).parameters.values() if p.default != inspect._empty
}
for default_varname, default_value in default_values.items():
if (
default_varname in kwargs_for_fingerprint
and kwargs_for_fingerprint[default_varname] == default_value
):
kwargs_for_fingerprint.pop(default_varname)
# compute new_fingerprint and add it to the args of not in-place transforms
if inplace:
new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint)
else:
for fingerprint_name in fingerprint_names: # transforms like `train_test_split` have several hashes
if kwargs.get(fingerprint_name) is None:
kwargs_for_fingerprint["fingerprint_name"] = fingerprint_name
kwargs[fingerprint_name] = update_fingerprint(
self._fingerprint, transform, kwargs_for_fingerprint
)
# Call actual function
out = func(self, *args, **kwargs)
# Update fingerprint of in-place transforms + update in-place history of transforms
if inplace: # update after calling func so that the fingerprint doesn't change if the function fails
self._fingerprint = new_fingerprint
return out
wrapper._decorator_name_ = "fingerprint"
return wrapper
return _fingerprint
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.