max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
main.py | hermanxcxr/ong_projects_recommender | 1 | 12789551 | from extraction import Extraction
from transform import Transform
from db_search import DbSearch
if __name__ == '__main__':
task = input('Buscar coincidencias de proyectos (y/n): ')
if task == 'y':
country = input('Country: ')
funding = input('Funding: ')
keywords = input('Keywords: ')
db_search = DbSearch(country,funding,keywords)
db_search.db_searcher()
elif task == '0':
extraction = Extraction(
'../../../selenium_driver/chromedriver96.exe',
'https://www.grants.gov/web/grants/search-grants.html',
'https://www.grants.gov/view-opportunity.html?oppId='
)
extraction.extractor()
transform = Transform()
transform.db_transformer() | 2.875 | 3 |
tests/component/test_one_component.py | mwtoews/pymt | 38 | 12789552 | import os
from pytest import approx
from pymt.component.component import Component
from pymt.framework.services import del_component_instances
def test_no_events(with_no_components):
del_component_instances(["AirPort"])
comp = Component("AirPort", uses=[], provides=[], events=[])
comp.go()
assert comp._port.current_time == approx(100.0)
def test_from_string(with_no_components):
del_component_instances(["air_port"])
contents = """
name: air_port
class: AirPort
"""
comp = Component.from_string(contents)
comp.go()
assert comp._port.current_time == 100.0
def test_print_events(tmpdir, with_no_components):
del_component_instances(["earth_port"])
contents = """
name: earth_port
class: EarthPort
print:
- name: earth_surface__temperature
interval: 0.1
format: nc
- name: earth_surface__density
interval: 20.
format: netcdf
- name: glacier_top_surface__slope
interval: 0.3
format: nc
"""
with tmpdir.as_cwd():
comp = Component.from_string(contents)
comp.go()
assert comp._port.current_time == 100.0
assert os.path.isfile("earth_surface__temperature.nc")
assert os.path.isfile("glacier_top_surface__slope.nc")
assert os.path.isfile("earth_surface__density.nc")
def test_rerun(with_no_components):
del_component_instances(["AirPort"])
comp = Component("AirPort", uses=[], provides=[], events=[])
comp.go()
assert comp._port.current_time == 100.0
comp.go()
assert comp._port.current_time == 100.0
def test_rerun_with_print(tmpdir, with_no_components):
del_component_instances(["earth_port"])
contents = """
name: earth_port
class: EarthPort
print:
- name: earth_surface__temperature
interval: 20
format: netcdf
"""
with tmpdir.as_cwd():
comp = Component.from_string(contents)
comp.go()
assert comp._port.current_time == approx(100.0)
assert os.path.isfile("earth_surface__temperature.nc")
# os.remove("earth_surface__temperature.nc")
del_component_instances(["earth_port"])
comp = Component.from_string(contents)
comp.go()
assert comp._port.current_time == approx(100.0)
assert os.path.isfile("earth_surface__temperature.nc")
| 2.125 | 2 |
gdc_filtration_tools/tools/create_oxog_intervals.py | MarcSaric/variant-filtration-tool | 0 | 12789553 | """Takes an input VCF file and convert it to an interval list
for use by Broad oxog metrics tool. This assumes that the
input VCF only contains SNPs.
@author: <NAME> <<EMAIL>>
"""
import pysam
from gdc_filtration_tools.logger import Logger
def create_oxog_intervals(input_vcf: str, output_file: str) -> None:
"""
Takes a SNP-only VCF file and creates an interval list for
use by the Broad oxog metrics tool.
:param input_vcf: The input SNP-only VCF file to extract intervals from.
:param output_file: The output interval list to create.
"""
logger = Logger.get_logger("create_oxog_intervals")
logger.info("Extracts interval-file for Broad OxoG metrics from VCF.")
logger.warning("Expects a SNP-Only VCF!!")
# setup
total = 0
# Vcf reader
reader = pysam.VariantFile(input_vcf)
# Process
try:
with open(output_file, "wt") as o:
for record in reader.fetch():
total += 1
row = "{0}:{1}".format(record.contig, record.pos)
o.write(row + "\n")
finally:
reader.close()
logger.info("Processed {} records".format(total))
| 3.078125 | 3 |
nobos_torch_lib/configs/training_configs/training_config_base.py | noboevbo/nobos_torch_lib | 2 | 12789554 | <reponame>noboevbo/nobos_torch_lib<gh_stars>1-10
import os
from nobos_commons.utils.file_helper import get_create_path
from nobos_torch_lib.learning_rate_schedulers.learning_rate_scheduler_base import LearningRateSchedulerBase
from nobos_torch_lib.learning_rate_schedulers.learning_rate_scheduler_dummy import LearningRateSchedulerDummy
class TrainingConfigBase(object):
def __init__(self, model_name: str, model_dir: str):
self.model_name: str = model_name
self.model_dir: str = get_create_path(model_dir)
self.num_epochs = 150
self.checkpoint_epoch: int = 50
# Optimizer
self.learning_rate: float = 0.01
self.momentum: float = 0.9
self.weight_decay: float = 5e-4
# LR Scheduler
self.learning_rate_scheduler: LearningRateSchedulerBase = LearningRateSchedulerDummy()
def get_output_path(self, epoch: int):
return os.path.join(self.model_dir, "{}_cp{}.pth".format(self.model_name, str(epoch).zfill(4)))
| 2.109375 | 2 |
app/library/technologies/bundle.py | imamsolikhin/Python | 0 | 12789555 | # -*- coding: utf-8 -*-
"""The Bundle module defines a few Ethernet specific network element classes: BundleInterface"""
# local modules
import pynt.elements
import pynt.layers
import pynt.xmlns
# ns and layers variables and GetCreateWellKnownAdaptationFunction() functions are always present in the pynt.technologies.* files.
prefix = "bundle"
uri = 'http://www.science.uva.nl/research/sne/ndl/bundle#'
schemaurl = 'http://www.science.uva.nl/research/sne/schema/bundle.rdf'
humanurl = 'http://www.science.uva.nl/research/sne/ndl/?c=20-Technology-Schemas'
def GetNamespace():
global prefix, uri, schemaurl, humanurl
return pynt.xmlns.GetCreateNamespace(
prefix = prefix,
uri = uri,
schemaurl = schemaurl,
humanurl = humanurl,
layerschema = True,
)
def GetLayer(shortcut):
if shortcut == 'bundle':
return pynt.layers.GetCreateLayer('BundleNetworkElement', namespace=GetNamespace(), name="Bundle")
else:
raise AttributeError("Unknown layer '%s'" % shortcut)
def GetCreateWellKnownAdaptationFunction(name):
global uri
raise AttributeError("Adaptation Function '%s' unknown in namespace %s" % (name, uri))
# pynt.elements.GetCreateInterfaceLayer("BundleInterface", namespace=GetNamespace(), layer=GetLayer('bundle'))
class BundleInterface(pynt.elements.Interface):
"""Bundle Interface: A duct"""
def __init__(self, *args, **params):
pynt.elements.Interface.__init__(self, *args, **params)
self.layer = GetLayer('bundle')
| 2.6875 | 3 |
examples/on_improvement.py | roshanrahman/miraiml | 0 | 12789556 | <filename>examples/on_improvement.py
from time import sleep
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import roc_auc_score
from miraiml import HyperSearchSpace, Config, Engine
# Let's use a single Naive Bayes classifier for this example.
hyper_search_spaces = [HyperSearchSpace(model_class=GaussianNB, id='Gaussian NB')]
config = Config(
local_dir='miraiml_local_on_improvement',
problem_type='classification',
hyper_search_spaces=hyper_search_spaces,
score_function=roc_auc_score
)
# Simply printing the best score on improvement. This function must receive a
# dictionary, which is the return of the request_status method.
def on_improvement(status):
print('Scores:', status['scores'])
# Instantiating the engine
engine = Engine(config, on_improvement=on_improvement)
# Loading data
data = pd.read_csv('pulsar_stars.csv')
train_data, test_data = train_test_split(data, stratify=data['target_class'],
test_size=0.2, random_state=0)
engine.load_data(train_data, 'target_class', test_data)
# Starting the engine
engine.restart()
# Let's watch the engine print the best score for 10 seconds
sleep(10)
engine.interrupt()
| 3.140625 | 3 |
includes/remote.py | b1scuit-thi3f/jimiPlugin-remote | 3 | 12789557 | <reponame>b1scuit-thi3f/jimiPlugin-remote
class remote():
def command(self, command, args=[], elevate=False, runAs=None, timeout=None):
self.error = "Not implimented"
return (-2555, "Not implimented", "Not implimented")
def reboot(self,timeout):
# Not implimented yet!
self.error = "Not implimented"
return False
def upload(self, localFile, remotePath):
# Not supported!
self.error = "Not supported"
return False
def download(self, remoteFile, localPath, createMissingFolders):
# Not supported!
self.error = "Not supported"
return False
| 2.328125 | 2 |
tools/test_gan_ui.py | ChenFengYe/relightable-nr | 0 | 12789558 | <gh_stars>0
from flask import Flask
from flask import request, jsonify
from flask import send_file
from io import BytesIO
from PIL import Image
from test_gan import build_model
import numpy as np
import json
import torch
import cv2
import math
import time
class Args():
def __init__(self, cfg, opts):
self.cfg = cfg
self.opts = opts
# fashion video
model_path = '/home/chenxin/relightable-nr/data/200909_fashion_small/logs/09-10_03-35-40_ganhd_mask/200909_GANHD_Contextual_mask.yaml'
# model_path = '/home/chenxin/relightable-nr/data/200909_fashion_small/logs/09-10_06-18-35_ganhd_mask/200909_GANHD_Contextual_mask.yaml'
# trump
# model_path = '/new_disk/chenxin/relightable-nr/data/200906_trump/logs/09-06_11-04-21_test_8_trump_from_internet/200903_GAN_APose.yaml'
# # sport short male running
# model_path = '/new_disk/chenxin/relightable-nr/data/200903_justin/logs/09-03_16-02-04_cam_9views/200903_GAN_APose.yaml'
# # shirt female Apose
# model_path = '/new_disk/chenxin/relightable-nr/data/200830_hnrd_SDAP_14442478293/logs/09-02_16-34-18_cam_9views/200830_GAN_APose.yaml'
args = Args(model_path, ['WORKERS','0', 'TEST.BATCH_SIZE','1'])
# args = Args('/new_disk/chenxin/relightable-nr/data/200830_hnrd_SDAP_30714418105/logs/08-31_07-21-59_NoAtt_linear/200830_GAN_APose.yaml',
# ['WORKERS','0', 'TEST.BATCH_SIZE','1'])
app = Flask(__name__)
def rodrigues_rotation_matrix(axis, theta):
axis = np.asarray(axis)
theta = np.asarray(theta)
axis = axis/math.sqrt(np.dot(axis, axis))
a = math.cos(theta/2.0)
b, c, d = -axis*math.sin(theta/2.0)
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
def serve_pil_image(pil_img):
img_io = BytesIO()
pil_img.save(img_io, 'JPEG', quality=80)
img_io.seek(0)
return send_file(img_io, mimetype='image/jpeg')
# inference loading for ui
def prepare_camera_transform(obj, Ts):
if Ts is not None:
Ts = np.linalg.inv(Ts)
Ts = torch.from_numpy(Ts.astype(np.float32))
in_points = obj['v_attr']['v']
center = torch.mean(in_points,dim=0).cpu()
# up = -torch.mean(Ts[:,0:3,0],dim =0)
up = torch.tensor([0.,1.,0.])
# up = -torch.tensor([0.,0.,1.])
# up = torch.tensor([0.,0.,1.])
# up = torch.tensor([0.,1.,0.]) # dome camera
# up = -Ts[0:3,0]
up = up / torch.norm(up)
num_points = torch.Tensor([in_points.size(0)])
in_points = in_points.cuda()
radius = torch.norm(Ts[0:3,3] - center) *1.0
# center = center + up*radius*0.35
center = center
v = torch.randn(3)
v = v - up.dot(v)*up
v = v / torch.norm(v)
s_pos = center + v * radius
# s_pos = Ts[0,0:3,3]
center = center.numpy()
up = up.numpy()
radius = radius.item()
s_pos = s_pos.numpy()
global global_pos
global xaxis
global_pos = s_pos
lookat = center - global_pos
dis=np.linalg.norm(lookat)/100
lookat = lookat/np.linalg.norm(lookat)
xaxis = np.cross(lookat, up)
xaxis = xaxis / np.linalg.norm(xaxis)
cam_data = {}
global global_center
global_center = center
cam_data['center'] = global_center
cam_data['up'] = up
cam_data['dis'] = dis
return cam_data
def control_cam(data):
op=data['op']
global control_speed
global is_move
global is_rotate
if op[0] == 9:
is_rotate = not is_rotate
elif op[0] == 10:
control_speed = control_speed+1
elif op[0] == 11:
control_speed = control_speed-1
elif op[0] == 12:
tmp = is_move
is_move = not tmp
if is_rotate:
op[0] = 1
return data
def calculate_cam_pose(data, cam_data):
global control_speed
global global_pos
global global_center
global xaxis
# global_center = cam_data['center']
up = cam_data['up']
dis = cam_data['dis']*2**control_speed
# calculate cam
op=data['op']
angle = 3.1415926*2/360.0*(2**control_speed)
global_pos = global_pos - global_center
global is_move
if not is_move:
if op[0]==1:
print('LeftLook')
global_pos = rodrigues_rotation_matrix(up,-angle).dot(global_pos)
elif op[0]==2:
print('RightLook')
global_pos = rodrigues_rotation_matrix(up,angle).dot(global_pos)
elif op[0]==3:
print('UpLook')
global_pos = rodrigues_rotation_matrix(xaxis,-angle).dot(global_pos)
elif op[0]==4:
print('DownLook')
global_pos = rodrigues_rotation_matrix(xaxis,angle).dot(global_pos)
else:
move_step = 0.05
if op[0]==1:
print('LeftLook')
global_center = global_center + move_step*xaxis
elif op[0]==2:
print('RightLook')
global_center = global_center - move_step*xaxis
elif op[0]==3:
print('UpLook')
global_center = global_center - move_step*up
elif op[0]==4:
print('DownLook')
global_center = global_center + move_step*up
if op[0]==5:
print('ZoomIn')
global_pos = global_pos-dis*global_pos/np.linalg.norm(global_pos)
elif op[0]==6:
print('ZoomOut')
global_pos = global_pos+dis*global_pos/np.linalg.norm(global_pos)
global_pos = global_pos + global_center
lookat = global_center - global_pos
lookat = lookat/np.linalg.norm(lookat)
# yaxis = -np.cross(lookat, up)
# yaxis = yaxis / np.linalg.norm(yaxis)
# xaxis = np.cross(yaxis,lookat)
# xaxis = xaxis/np.linalg.norm(xaxis)
xaxis = np.cross(lookat, up)
xaxis = xaxis / np.linalg.norm(xaxis)
yaxis = -np.cross(xaxis,lookat)
yaxis = yaxis/np.linalg.norm(yaxis)
nR = np.array([xaxis,yaxis,lookat, global_pos]).T
nR = np.concatenate([nR,np.array([[0,0,0,1]])])
T = torch.Tensor(nR)
# world2cam
T = np.linalg.inv(T.numpy())
T = torch.Tensor(T).cuda()
return T
def calculate_frame(data, frame_id, frame_range):
frame_id = frame_id.item()
op=data['op']
frame_len = len(frame_range)
idx = frame_range.index(frame_id)
if op[0]==7:
idx = (idx-1)%frame_len
print('previous frame')
elif op[0]==8:
idx = (idx+1)%frame_len
print('previous frame')
cur_frame_id = frame_range[idx]
return torch.tensor([cur_frame_id])
# load model
model, view_dataloader, view_dataset, save_dict = build_model(args)
# prepare cam
global global_view_data
global_view_data = next(iter(view_dataloader))
obj = view_dataset.objs[global_view_data['f_idx'].item()]
Ts = view_dataset.poses_all[0]
Ts = np.dot(Ts, view_dataset.global_RT_inv)
cam_data = prepare_camera_transform(obj, Ts)
# prepare interaction
global is_move
is_move = False
global is_rotate
is_rotate = False
global control_speed
control_speed = 0.0
global rotate_count
rotate_count = 0
@app.route('/', methods = ["GET","POST"])
def hello_world():
t_start_all = time.time()
t_start = time.time()
# recevice data
data = request.get_data()
data = json.loads(data)
# generate view
# view_data = view_dataset.__getitem__(0)
# T = view_data['pose'][0,...]
data = control_cam(data)
T = calculate_cam_pose(data, cam_data)
global global_view_data
global_view_data['f_idx'] = calculate_frame(data, global_view_data['f_idx'], view_dataset.frame_range)
global_view_data = view_dataset.read_view_from_cam(global_view_data, T)
# build calib
global is_rotate
global rotate_count
if is_rotate:
view_dataset.calib['poses'][rotate_count, ...] = global_view_data['pose'][0, ...].clone().detach().cpu().numpy()
view_dataset.calib['projs'][rotate_count, ...] = global_view_data['proj'][0, ...].clone().detach().cpu().numpy()
view_dataset.calib['dist_coeffs'][rotate_count, ...] = global_view_data['dist_coeff'][0, ...].clone().detach().cpu().numpy()
rotate_count += 1
if rotate_count == 360:
import scipy
scipy.io.savemat('/home/chenxin/relightable-nr/data/200909_fashion_small/calib/calib_0911_rendered360_fix2.mat', view_dataset.calib)
# inference
model.set_input(global_view_data)
print('load data'+ str(time.time()-t_start) + ' s')
t_start = time.time()
model.test(global_view_data)
print('test data'+ str(time.time()-t_start) + ' s')
t_start = time.time()
outputs = model.get_current_results()
outputs_img = outputs['rs'][:,0:3,:,:]
neural_img = outputs['nimg_rs']
# uv_map = global_view_data['uv_map']
outputs_img = outputs_img.detach().cpu()[0]
outputs_img = cv2.cvtColor(outputs_img.permute(1,2,0).numpy()*255.0, cv2.COLOR_BGR2RGB)
neural_img = neural_img.detach().cpu()[0]
neural_img = cv2.cvtColor(neural_img.permute(1,2,0).numpy()*255.0, cv2.COLOR_BGR2RGB)
# Im = Image.fromarray(outputs_img.astype('uint8')).convert('RGB')
# mask = mask_t.permute(1,2,0).numpy()*255.0
# rgba=img*mask/255.0+(255.0-mask)*bg/255.0
# Im = Image.open("/new_disk/chenxin/relightable-nr/data/200830_hnrd_SDAP_30714418105/img/SDAP_30714418105_80_00000.jpg")
# print(Im)
# mask = np.concatenate([mask,mask,mask],axis=2)
# depth_t = depth.detach().cpu()[0]
# depth_res = depth_t.permute(1,2,0).numpy()*255.0
# depth_res = np.concatenate([depth_res,depth_res,depth_res],axis=2)
Im_res = np.hstack((outputs_img, neural_img))
Im = Image.fromarray(Im_res.astype('uint8')).convert('RGB')
im_data = serve_pil_image(Im)
print('outp data'+ str(time.time()-t_start) + ' s')
print('all time'+ str(time.time()-t_start_all) + ' s')
return im_data
if __name__ == '__main__':
app.run(debug=False, threaded=True, host='0.0.0.0',port=8030)
| 2.015625 | 2 |
proteinsolver/utils/protein_design.py | ostrokach/proteinsolver | 27 | 12789559 | import heapq
from dataclasses import dataclass, field
from typing import Any, Optional, Tuple
import torch
import torch.nn as nn
from torch_geometric.data import Data
def get_node_proba(net, x, edge_index, edge_attr, num_categories=20):
raise Exception("Use get_node_outputs instead!")
def get_node_value(net, x, edge_index, edge_attr, num_categories=20):
raise Exception("Use get_node_outputs instead!")
@torch.no_grad()
def get_node_outputs(
net: nn.Module,
x: torch.Tensor,
edge_index: torch.Tensor,
edge_attr: torch.Tensor,
num_categories: int = 20,
output_transform: Optional[str] = None,
oneshot: bool = False,
) -> torch.Tensor:
"""Return network output for each node in the reference sequence.
Args:
net: The network to use for making predictions.
x: Node attributes for the target sequence.
edge_index: Edge indices of the target sequence.
edge_attr: Edge attributes of the target sequence.
num_categories: The number of categories to which the network assigns individual nodes
(e.g. the number of amino acids for the protein design problem).
output_transform: Transformation to apply to network outputs.
- `None` - No transformation.
- `proba` - Apply the softmax transformation.
- `logproba` - Apply the softmax transformation and log the results.
oneshot: Whether predictions should be made using a single pass through the network,
or incrementally, by making a single prediction at a time.
Returns:
A tensor of network predictions for each node in `x`.
"""
assert output_transform in [None, "proba", "logproba"]
x_ref = x
x = torch.ones_like(x_ref) * num_categories
x_proba = torch.zeros_like(x_ref).to(torch.float)
index_array_ref = torch.arange(x_ref.size(0))
mask = x == num_categories
while mask.any():
output = net(x, edge_index, edge_attr)
if output_transform == "proba":
output = torch.softmax(output, dim=1)
elif output_transform == "logproba":
output = torch.softmax(output, dim=1).log()
output_for_x = output.gather(1, x_ref.view(-1, 1))
if oneshot:
return output_for_x.data.cpu()
output_for_x = output_for_x[mask]
index_array = index_array_ref[mask]
max_proba, max_proba_position = output_for_x.max(dim=0)
assert x[index_array[max_proba_position]] == num_categories
assert x_proba[index_array[max_proba_position]] == 0
correct_amino_acid = x_ref[index_array[max_proba_position]].item()
x[index_array[max_proba_position]] = correct_amino_acid
assert output[index_array[max_proba_position], correct_amino_acid] == max_proba
x_proba[index_array[max_proba_position]] = max_proba
mask = x == num_categories
return x_proba.data.cpu()
@torch.no_grad()
def scan_with_mask(
net: nn.Module,
x: torch.Tensor,
edge_index: torch.Tensor,
edge_attr: torch.Tensor,
num_categories: int = 20,
output_transform: Optional[str] = None,
) -> torch.Tensor:
"""Generate an output for each node in the sequence by masking one node at a time."""
assert output_transform in [None, "proba", "logproba"]
x_ref = x
output_for_mask = torch.zeros_like(x_ref).to(torch.float)
for i in range(x_ref.size(0)):
x = x_ref.clone()
x[i] = num_categories
output = net(x, edge_index, edge_attr)
if output_transform == "proba":
output = torch.softmax(output, dim=1)
elif output_transform == "logproba":
output = torch.softmax(output, dim=1).log()
output_for_x = output.gather(1, x_ref.view(-1, 1))
output_for_mask[i] = output_for_x[i]
return output_for_mask.data.cpu()
# === Protein design ===
@torch.no_grad()
def design_sequence(
net: nn.Module,
data: Data,
random_position: bool = False,
value_selection_strategy: str = "map",
num_categories: int = None,
temperature: float = 1.0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Generate new sequences.
Args:
net: A trained neural network to use for designing sequences.
data: The data on which to base new sequences.
random_position: Whether the next position to explore should be selected at random
or by selecting the position for which we have the most confident predictions.
value_selection_strategy: Controls the strategy for generating new sequences:
- "map" - Select the most probable residue each time.
- "multinomial" - Sample residues according to the probability assigned
by the network.
- "ref" - Select the residue provided by the `data.x` reference.
num_categories: The number of categories possible.
If `None`, assume that the number of categories corresponds to the maximum value
in `data.x`.
Returns:
A torch tensor of designed sequences.
"""
assert value_selection_strategy in ("map", "multinomial", "ref")
if num_categories is None:
num_categories = data.x.max().item()
if hasattr(data, "batch"):
batch_size = data.batch.max().item() + 1
else:
batch_size = 1
x_ref = data.y if hasattr(data, "y") and data.y is not None else data.x
x = torch.ones_like(data.x) * num_categories
x_proba = torch.zeros_like(x).to(torch.float)
# First, gather probabilities for pre-assigned residues
mask_filled = (x_ref != num_categories) & (x == num_categories)
while mask_filled.any():
for (
max_proba_index,
chosen_category,
chosen_category_proba,
) in _select_residue_for_position(
net,
x,
x_ref,
data,
batch_size,
mask_filled,
random_position,
"ref",
temperature=temperature,
):
assert chosen_category != num_categories
assert x[max_proba_index] == num_categories
assert x_proba[max_proba_index] == 0
x[max_proba_index] = chosen_category
x_proba[max_proba_index] = chosen_category_proba
mask_filled = (x_ref != num_categories) & (x == num_categories)
assert (x == x_ref).all().item()
# Next, select residues for unassigned positions
mask_empty = x == num_categories
while mask_empty.any():
for (
max_proba_index,
chosen_category,
chosen_category_proba,
) in _select_residue_for_position(
net,
x,
x_ref,
data,
batch_size,
mask_empty,
random_position,
value_selection_strategy,
temperature=temperature,
):
assert chosen_category != num_categories
assert x[max_proba_index] == num_categories
assert x_proba[max_proba_index] == 0
x[max_proba_index] = chosen_category
x_proba[max_proba_index] = chosen_category_proba
mask_empty = x == num_categories
return x.cpu(), x_proba.cpu()
def _select_residue_for_position(
net,
x,
x_ref,
data,
batch_size,
mask_ref,
random_position,
value_selection_strategy,
temperature=1.0,
):
"""Predict a new residue for an unassigned position for each batch in `batch_size`."""
assert value_selection_strategy in ("map", "multinomial", "ref")
output = net(x, data.edge_index, data.edge_attr)
output = output / temperature
output_proba_ref = torch.softmax(output, dim=1)
output_proba_max_ref, _ = output_proba_ref.max(dim=1)
index_array_ref = torch.arange(x.size(0))
for i in range(batch_size):
mask = mask_ref
if batch_size > 1:
mask = mask & (data.batch == i)
index_array = index_array_ref[mask]
max_probas = output_proba_max_ref[mask]
if random_position:
selected_residue_subindex = torch.randint(0, max_probas.size(0), (1,)).item()
max_proba_index = index_array[selected_residue_subindex]
else:
selected_residue_subindex = max_probas.argmax().item()
max_proba_index = index_array[selected_residue_subindex]
category_probas = output_proba_ref[max_proba_index]
if value_selection_strategy == "map":
chosen_category_proba, chosen_category = category_probas.max(dim=0)
elif value_selection_strategy == "multinomial":
chosen_category = torch.multinomial(category_probas, 1).item()
chosen_category_proba = category_probas[chosen_category]
elif value_selection_strategy == "ref":
chosen_category = x_ref[max_proba_index]
chosen_category_proba = category_probas[chosen_category]
yield max_proba_index, chosen_category, chosen_category_proba
# ASTAR approach
@torch.no_grad()
def get_descendents(net, x, x_proba, edge_index, edge_attr, cutoff):
index_array = torch.arange(x.size(0))
mask = x == 20
output = net(x, edge_index, edge_attr)
output = torch.softmax(output, dim=1)
output = output[mask]
index_array = index_array[mask]
max_proba, max_index = output.max(dim=1)[0].max(dim=0)
row_with_max_proba = output[max_index]
sum_log_prob = x_proba.sum()
assert sum_log_prob.item() <= 0, x_proba
# p_cutoff = min(torch.exp(sum_log_prob), row_with_max_proba.max()).item()
children = []
for i, p in enumerate(row_with_max_proba):
# if p < p_cutoff:
# continue
x_clone = x.clone()
x_proba_clone = x_proba.clone()
assert x_clone[index_array[max_index]] == 20
assert x_proba_clone[index_array[max_index]] == cutoff
x_clone[index_array[max_index]] = i
x_proba_clone[index_array[max_index]] = torch.log(p)
children.append((x_clone, x_proba_clone))
return children
@dataclass(order=True)
class PrioritizedItem:
p: float
x: Any = field(compare=False)
x_proba: float = field(compare=False)
@torch.no_grad()
def design_protein(net, x, edge_index, edge_attr, results, cutoff):
"""Design protein sequences using a search strategy."""
x_proba = torch.ones_like(x).to(torch.float) * cutoff
heap = [PrioritizedItem(0, x, x_proba)]
i = 0
while heap:
item = heapq.heappop(heap)
if i % 1000 == 0:
print(
f"i: {i}; p: {item.p:.4f}; num missing: {(item.x == 20).sum()}; "
f"heap size: {len(heap):7d}; results size: {len(results)}"
)
if not (item.x == 20).any():
results.append(item)
else:
children = get_descendents(net, item.x, item.x_proba, edge_index, edge_attr, cutoff)
for x, x_proba in children:
heapq.heappush(heap, PrioritizedItem(-x_proba.sum(), x, x_proba))
i += 1
if len(heap) > 1_000_000:
heap = heap[:700_000]
heapq.heapify(heap)
return results
| 2.546875 | 3 |
fastmri_recon/evaluate/scripts/dealiasing_eval.py | samiulshuvo/fastmri-reproducible-benchmark | 105 | 12789560 | <gh_stars>100-1000
import os
from tqdm import tqdm
from fastmri_recon.config import *
from fastmri_recon.data.datasets.fastmri_pyfunc import train_masked_kspace_dataset_from_indexable as singlecoil_dataset
from fastmri_recon.evaluate.metrics.np_metrics import METRIC_FUNCS, Metrics
from fastmri_recon.models.subclassed_models.denoisers.proposed_params import build_model_from_specs
from fastmri_recon.models.subclassed_models.multiscale_complex import MultiscaleComplex
def evaluate_xpdnet_dealiasing(
model_fun,
model_kwargs,
run_id,
n_scales=0,
n_epochs=200,
contrast='CORPD_FBK',
af=4,
n_samples=None,
cuda_visible_devices='0123',
):
val_path = f'{FASTMRI_DATA_DIR}singlecoil_val/'
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(cuda_visible_devices)
val_set = singlecoil_dataset(
val_path,
AF=af,
contrast=contrast,
inner_slices=None,
rand=False,
scale_factor=1e6,
)
if n_samples is not None:
val_set = val_set.take(n_samples)
else:
val_set = val_set.take(199)
model = MultiscaleComplex(
model_fun=model_fun,
model_kwargs=model_kwargs,
res=False,
n_scales=n_scales,
fastmri_format=True,
)
model(next(iter(val_set))[0])
model.load_weights(f'{CHECKPOINTS_DIR}checkpoints/{run_id}-{n_epochs:02d}.hdf5')
m = Metrics(METRIC_FUNCS)
for x, y_true in tqdm(val_set.as_numpy_iterator(), total=199 if n_samples is None else n_samples):
y_pred = model.predict(x, batch_size=1)
m.push(y_true[..., 0], y_pred[..., 0])
return ['PSNR', 'SSIM'], list(m.means().values())
| 1.75 | 2 |
test.py | JackWetherell/self-driving-car-behavioral-cloning | 0 | 12789561 | import pickle
import numpy as np
import sklearn as skl
import tensorflow as tf
import matplotlib.pyplot as plt
# Set up GPU
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
# Testing hyperparameters
DATA_DIR = 'data' # pre-processed data path
# Load the pre-processed data to pickle files
x_train = pickle.load(open(DATA_DIR + '/x_train.p', 'rb'))
y_train = pickle.load(open(DATA_DIR + '/y_train.p', 'rb'))
x_test = pickle.load(open(DATA_DIR + '/x_test.p', 'rb'))
y_test = pickle.load(open(DATA_DIR + '/y_test.p', 'rb'))
print('x_train.shape = {0}, x_test.shape = {1}'.format(x_train.shape, x_test.shape))
print('y_train.shape = {0}, y_test.shape = {1}'.format(y_train.shape, y_test.shape))
# load the model
model = tf.keras.models.load_model('models/model.h5')
# print the model
print(model.summary())
# Evaluate the model
model.evaluate(x_test, y_test, verbose=2)
| 2.53125 | 3 |
ee/clickhouse/queries/funnels/test/test_funnel_correlation_persons.py | csmatar/posthog | 0 | 12789562 | from uuid import uuid4
from ee.clickhouse.models.event import create_event
from ee.clickhouse.queries.funnels.funnel_correlation_persons import FunnelCorrelationPersons
from ee.clickhouse.util import ClickhouseTestMixin
from posthog.constants import INSIGHT_FUNNELS
from posthog.models import Cohort, Filter
from posthog.models.person import Person
from posthog.test.base import APIBaseTest, test_with_materialized_columns
FORMAT_TIME = "%Y-%m-%d 00:00:00"
MAX_STEP_COLUMN = 0
COUNT_COLUMN = 1
PERSON_ID_COLUMN = 2
def _create_person(**kwargs):
person = Person.objects.create(**kwargs)
return Person(id=person.uuid, uuid=person.uuid)
def _create_event(**kwargs):
kwargs.update({"event_uuid": uuid4()})
create_event(**kwargs)
class TestClickhouseFunnelCorrelationPersons(ClickhouseTestMixin, APIBaseTest):
maxDiff = None
def test_basic_funnel_correlation_with_events(self):
filters = {
"events": [
{"id": "user signed up", "type": "events", "order": 0},
{"id": "paid", "type": "events", "order": 1},
],
"insight": INSIGHT_FUNNELS,
"date_from": "2020-01-01",
"date_to": "2020-01-14",
"funnel_correlation_type": "events",
}
filter = Filter(data=filters)
success_target_persons = []
failure_target_persons = []
for i in range(10):
person = _create_person(distinct_ids=[f"user_{i}"], team_id=self.team.pk)
_create_event(
team=self.team, event="user signed up", distinct_id=f"user_{i}", timestamp="2020-01-02T14:00:00Z",
)
if i % 2 == 0:
_create_event(
team=self.team,
event="positively_related",
distinct_id=f"user_{i}",
timestamp="2020-01-03T14:00:00Z",
)
success_target_persons.append(str(person.uuid))
_create_event(
team=self.team, event="paid", distinct_id=f"user_{i}", timestamp="2020-01-04T14:00:00Z",
)
for i in range(10, 20):
person = _create_person(distinct_ids=[f"user_{i}"], team_id=self.team.pk)
_create_event(
team=self.team, event="user signed up", distinct_id=f"user_{i}", timestamp="2020-01-02T14:00:00Z",
)
if i % 2 == 0:
_create_event(
team=self.team,
event="negatively_related",
distinct_id=f"user_{i}",
timestamp="2020-01-03T14:00:00Z",
)
failure_target_persons.append(str(person.uuid))
# One positively_related as failure
person_fail = _create_person(distinct_ids=[f"user_fail"], team_id=self.team.pk)
_create_event(
team=self.team, event="user signed up", distinct_id=f"user_fail", timestamp="2020-01-02T14:00:00Z",
)
_create_event(
team=self.team, event="positively_related", distinct_id=f"user_fail", timestamp="2020-01-03T14:00:00Z",
)
# One negatively_related as success
person_succ = _create_person(distinct_ids=[f"user_succ"], team_id=self.team.pk)
_create_event(
team=self.team, event="user signed up", distinct_id=f"user_succ", timestamp="2020-01-02T14:00:00Z",
)
_create_event(
team=self.team, event="negatively_related", distinct_id=f"user_succ", timestamp="2020-01-03T14:00:00Z",
)
_create_event(
team=self.team, event="paid", distinct_id=f"user_succ", timestamp="2020-01-04T14:00:00Z",
)
# TESTS
# test positively_related successes
filter = filter.with_data(
{
"funnel_correlation_person_entity": {"id": "positively_related", "type": "events"},
"funnel_correlation_person_converted": "TrUe",
}
)
results, has_more_results = FunnelCorrelationPersons(filter, self.team).run()
self.assertFalse(has_more_results)
self.assertCountEqual([val["uuid"] for val in results], success_target_persons)
# test negatively_related failures
filter = filter.with_data(
{
"funnel_correlation_person_entity": {"id": "negatively_related", "type": "events"},
"funnel_correlation_person_converted": "falsE",
}
)
results, has_more_results = FunnelCorrelationPersons(filter, self.team).run()
self.assertFalse(has_more_results)
self.assertCountEqual([val["uuid"] for val in results], failure_target_persons)
# test positively_related failures
filter = filter.with_data(
{
"funnel_correlation_person_entity": {"id": "positively_related", "type": "events"},
"funnel_correlation_person_converted": "False",
}
)
results, has_more_results = FunnelCorrelationPersons(filter, self.team).run()
self.assertFalse(has_more_results)
self.assertCountEqual([val["uuid"] for val in results], [str(person_fail.uuid)])
# test negatively_related successes
filter = filter.with_data(
{
"funnel_correlation_person_entity": {"id": "negatively_related", "type": "events"},
"funnel_correlation_person_converted": "trUE",
}
)
results, has_more_results = FunnelCorrelationPersons(filter, self.team).run()
self.assertFalse(has_more_results)
self.assertCountEqual([val["uuid"] for val in results], [str(person_succ.uuid)])
# test all positively_related
filter = filter.with_data(
{
"funnel_correlation_person_entity": {"id": "positively_related", "type": "events"},
"funnel_correlation_person_converted": None,
}
)
results, has_more_results = FunnelCorrelationPersons(filter, self.team).run()
self.assertFalse(has_more_results)
self.assertCountEqual([val["uuid"] for val in results], [*success_target_persons, str(person_fail.uuid)])
# test all negatively_related
filter = filter.with_data(
{
"funnel_correlation_person_entity": {"id": "negatively_related", "type": "events"},
"funnel_correlation_person_converted": None,
}
)
results, has_more_results = FunnelCorrelationPersons(filter, self.team).run()
self.assertFalse(has_more_results)
self.assertCountEqual([val["uuid"] for val in results], [*failure_target_persons, str(person_succ.uuid)])
def test_people_arent_returned_multiple_times(self):
person = _create_person(distinct_ids=[f"user_1"], team_id=self.team.pk)
_create_event(
team=self.team, event="user signed up", distinct_id=f"user_1", timestamp="2020-01-02T14:00:00Z",
)
_create_event(
team=self.team, event="positively_related", distinct_id=f"user_1", timestamp="2020-01-03T14:00:00Z",
)
# duplicate event
_create_event(
team=self.team, event="positively_related", distinct_id=f"user_1", timestamp="2020-01-03T15:00:00Z",
)
_create_event(
team=self.team, event="paid", distinct_id=f"user_1", timestamp="2020-01-04T14:00:00Z",
)
filter = Filter(
data={
"events": [
{"id": "user signed up", "type": "events", "order": 0},
{"id": "paid", "type": "events", "order": 1},
],
"insight": INSIGHT_FUNNELS,
"date_from": "2020-01-01",
"date_to": "2020-01-14",
"funnel_correlation_type": "events",
"funnel_correlation_person_entity": {"id": "positively_related", "type": "events"},
"funnel_correlation_person_converted": "TrUe",
}
)
results, has_more_results = FunnelCorrelationPersons(filter, self.team).run()
self.assertFalse(has_more_results)
self.assertCountEqual([val["uuid"] for val in results], [str(person.uuid)])
| 2.171875 | 2 |
run.py | AnthraxisBR/kibana-index-alert-tool | 0 | 12789563 | """
Start the application
"""
import os
import getpass
from kibana_index_alert_tool import main
from dotenv import load_dotenv
home = os.path.expanduser("~")
load_dotenv(f'/{home}/.kibana_index_alert_tool')
if __name__ == '__main__':
main()
| 1.351563 | 1 |
transforms_deprecated/randomerasing.py | edificewang/imageclassification | 2 | 12789564 | import math
import random
import numpy as np
class RandomErasing_Tensor(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
img: RGB format,torch.tensor type
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=[0.4914, 0.4822, 0.4465]):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
print(img.shape)
if random.uniform(0, 1) > self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
img: RGB format ;from cv2.imread,just 3 channels
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=[0.4914, 0.4822, 0.4465])->None:
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img:np.array)->np.array:
if random.uniform(0, 1) > self.probability:
return img
for attempt in range(100):
rows,cols,channels=img.shape
area = rows * cols
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < cols and h < rows:
x1 = random.randint(0, rows - h)
y1 = random.randint(0, cols - w)
if channels == 3:
# img[x1:x1 + h, y1:y1 + w, 0] = self.mean[0]
# img[x1:x1 + h, y1:y1 + w, 1] = self.mean[1]
# img[x1:x1 + h, y1:y1 + w, 2] = self.mean[2]
img[x1:x1+h,y1:y1+w]=self.mean
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img
def RandomErasing_Tensor_test():
import cv2
import numpy as np
import torch
from torchvision import transforms
def toTensor(img):
assert type(img) == np.ndarray,'the img type is {}, but ndarry expected'.format(type(img))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img)
return img.float().div(255).unsqueeze(0) # 255也可以改为256
def tensor_to_np(tensor):
img = tensor.mul(255).byte()
img = img.cpu().numpy().squeeze(0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
img=cv2.imread('./1.jpg')
cv2.imwrite('org.tmp.jpg',img)
RE=RandomErasing_Tensor(probability=1)
img_tensor=toTensor(img)
image=RE(img_tensor)
cv2.imwrite('randomearsing.tmp.jpg',tensor_to_np(image))
def RandomErasing_test():
import cv2
import numpy as np
mean=[0,0,0]
img=cv2.imread('./1.jpg')
cv2.imwrite('org.tmp.jpg',img)
RE=RandomErasing(probability=1,mean=mean)
input_img=img[::]
image=RE(input_img)
cv2.imwrite('randomearsing.tmp.jpg',input_img)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--module',type=str,default='numpy')
opt=parser.parse_args()
if opt.module =='torch.tensor':
RandomErasing_Tensor_test()
else:
RandomErasing_test()
| 3.6875 | 4 |
setup.py | Xysss7/data_visualisation_project | 0 | 12789565 | from setuptools import setup
setup(
name="datavisualization",
version="1.2.0",
author="<NAME>",
packages=['datavisualization'],
install_requires=['matplotlib','IPython']
)
| 1.039063 | 1 |
python/12_Auto_ML_AML_Compute.py | rdmueller/TensorFlow101 | 13 | 12789566 | <gh_stars>10-100
import logging
import os
import shutil
import random
from matplotlib import pyplot as plt
from matplotlib.pyplot import imshow
import numpy as np
import pandas as pd
from sklearn import datasets
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.core.compute import AmlCompute
from azureml.core.compute_target import ComputeTargetException
from azureml.train.automl import AutoMLConfig
from azureml.train.automl.run import AutoMLRun
print("Azure ML SDK Version: ", azureml.core.VERSION)
# Initialize Workspace
ws = Workspace.from_config()
print("Resource group: ", ws.resource_group)
print("Location: ", ws.location)
print("Workspace name: ", ws.name)
# Choose a name for the run history container in the workspace.
experiment_name = 'automl-remote-batchai'
experiment = Experiment(ws, experiment_name)
# Create Azure ML Compute cluster (GPU-enabled) as a compute target
compute_target_name = 'myamlcompute'
try:
aml_compute = AmlCompute(workspace=ws, name=compute_target_name)
print('found existing Azure ML Compute cluster:', aml_compute.name)
except ComputeTargetException:
print('creating new Azure ML Compute cluster...')
aml_config = AmlCompute.provisioning_configuration(
vm_size="Standard_NC6",
vm_priority="dedicated",
min_nodes = 0,
max_nodes = 4,
idle_seconds_before_scaledown=300
)
aml_compute = AmlCompute.create(
ws,
name=compute_target_name,
provisioning_configuration=aml_config
)
aml_compute.wait_for_completion(show_output=True)
project_folder = './tmp/automl-remote-batchai'
if not os.path.exists(project_folder):
os.makedirs(project_folder)
shutil.copy('./scripts/get_data.py', project_folder)
print("Training the model...")
# configure Auto ML
automl_config = AutoMLConfig(
task = 'classification',
debug_log = 'automl_errors.log',
primary_metric = 'AUC_weighted',
iteration_timeout_minutes = 2,
iterations = 20,
n_cross_validations = 5,
preprocess = False,
max_concurrent_iterations = 5,
verbosity = logging.INFO,
path = project_folder,
compute_target = aml_compute,
data_script = project_folder + "/get_data.py"
)
remote_run = experiment.submit(automl_config, show_output = False)
remote_run.wait_for_completion(show_output = True)
# Retrieve All Child Runs
print("Retrieving All Child Runs")
children = list(remote_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
print(rundata)
# Retrieving the Best Model
print("Retrieving the Best Model")
best_run, fitted_model = remote_run.get_output()
print(best_run)
print(fitted_model)
# Best Model Based on Any Other Metric
print("Retrieving the Best Model based on log_loss")
lookup_metric = "log_loss"
best_run, fitted_model = remote_run.get_output(metric = lookup_metric)
print(best_run)
print(fitted_model)
# Model from a Specific Iteration
print("Retrieving a Model from a Specific Iteration")
iteration = 3
third_run, third_model = remote_run.get_output(iteration=iteration)
print(third_run)
print(third_model)
# Testing the Fitted Model
print("Testing the Fitted Model")
digits = datasets.load_digits()
X_test = digits.data[:10, :]
y_test = digits.target[:10]
images = digits.images[:10]
# Randomly select digits and test.
for index in np.random.choice(len(y_test), 2, replace = False):
print(index)
predicted = fitted_model.predict(X_test[index:index + 1])[0]
label = y_test[index]
title = "Label value = %d Predicted value = %d " % (label, predicted)
fig = plt.figure(1, figsize=(3,3))
ax1 = fig.add_axes((0,0,.8,.8))
ax1.set_title(title)
plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')
plt.show()
| 2.4375 | 2 |
recipes/opencolorio/all/conanfile.py | rockandsalt/conan-center-index | 562 | 12789567 | from conans import ConanFile, CMake, tools
import os
required_conan_version = ">=1.33.0"
class OpenColorIOConan(ConanFile):
name = "opencolorio"
description = "A color management framework for visual effects and animation."
license = "BSD-3-Clause"
homepage = "https://opencolorio.org/"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
"use_sse": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"use_sse": True
}
generators = "cmake", "cmake_find_package"
exports_sources = ["CMakeLists.txt", "patches/*"]
topics = ("colors", "visual", "effects", "animation")
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.arch not in ["x86", "x86_64"]:
del self.options.use_sse
def configure(self):
if self.options.shared:
del self.options.fPIC
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
def requirements(self):
# TODO: add GLUT (needed for ociodisplay tool)
self.requires("lcms/2.12")
self.requires("yaml-cpp/0.7.0")
if tools.Version(self.version) < "2.1.0":
self.requires("tinyxml/2.6.2")
if tools.Version(self.version) >= "2.1.0":
self.requires("pystring/1.1.3")
self.requires("expat/2.4.1")
self.requires("openexr/2.5.7")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
if tools.Version(self.version) >= "2.1.0":
self._cmake.definitions["OCIO_BUILD_PYTHON"] = False
else:
self._cmake.definitions["OCIO_BUILD_SHARED"] = self.options.shared
self._cmake.definitions["OCIO_BUILD_STATIC"] = not self.options.shared
self._cmake.definitions["OCIO_BUILD_PYGLUE"] = False
self._cmake.definitions["USE_EXTERNAL_YAML"] = True
self._cmake.definitions["USE_EXTERNAL_TINYXML"] = True
self._cmake.definitions["USE_EXTERNAL_LCMS"] = True
self._cmake.definitions["OCIO_USE_SSE"] = self.options.get_safe("use_sse", False)
# openexr 2.x provides Half library
self._cmake.definitions["OCIO_USE_OPENEXR_HALF"] = True
self._cmake.definitions["OCIO_BUILD_APPS"] = True
self._cmake.definitions["OCIO_BUILD_DOCS"] = False
self._cmake.definitions["OCIO_BUILD_TESTS"] = False
self._cmake.definitions["OCIO_BUILD_GPU_TESTS"] = False
self._cmake.definitions["OCIO_USE_BOOST_PTR"] = False
# avoid downloading dependencies
self._cmake.definitions["OCIO_INSTALL_EXT_PACKAGE"] = "NONE"
if self.settings.compiler == "Visual Studio" and not self.options.shared:
# define any value because ifndef is used
self._cmake.definitions["OpenColorIO_SKIP_IMPORTS"] = True
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
for module in ("expat", "lcms2", "pystring", "yaml-cpp", "Imath"):
tools.remove_files_by_mask(os.path.join(self._source_subfolder, "share", "cmake", "modules"), "Find"+module+".cmake")
def build(self):
self._patch_sources()
cm = self._configure_cmake()
cm.build()
def package(self):
cm = self._configure_cmake()
cm.install()
if not self.options.shared:
self.copy("*", src=os.path.join(self.package_folder,
"lib", "static"), dst="lib")
tools.rmdir(os.path.join(self.package_folder, "lib", "static"))
tools.rmdir(os.path.join(self.package_folder, "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "share"))
# nop for 2.x
tools.remove_files_by_mask(self.package_folder, "OpenColorIOConfig*.cmake")
tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), "*.pdb")
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "OpenColorIO"
self.cpp_info.names["cmake_find_package_multi"] = "OpenColorIO"
self.cpp_info.names["pkg_config"] = "OpenColorIO"
self.cpp_info.libs = tools.collect_libs(self)
if tools.Version(self.version) < "2.1.0":
if not self.options.shared:
self.cpp_info.defines.append("OpenColorIO_STATIC")
if self.settings.os == "Macos":
self.cpp_info.frameworks.extend(["Foundation", "IOKit", "ColorSync", "CoreGraphics"])
if self.settings.compiler == "Visual Studio" and not self.options.shared:
self.cpp_info.defines.append("OpenColorIO_SKIP_IMPORTS")
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH env var with: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
| 1.984375 | 2 |
trabajos/utilidades.py | jmjacquet/IronWeb | 0 | 12789568 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.contrib import messages
from django.conf import settings
from django.contrib.messages import constants as message_constants
from django.forms import Widget
from django.utils.safestring import mark_safe
from .models import *
import json
from decimal import *
ESTADO_NP = (
(0, u'Pendiente'),
(1, u'Anulada'),
(2, u'Generó Trabajo'),
)
| 1.5625 | 2 |
code/results/02_fig_01.py | data-intelligence-for-health-lab/delirium_prediction | 0 | 12789569 | <gh_stars>0
# --- loading libraries -------------------------------------------------------
import pandas as pd
import numpy as np
import datetime
# ------------------------------------------------------- loading libraries ---
# --- main routine ------------------------------------------------------------
# Opening ADMISSIONS
df = pd.read_pickle('/project/M-ABeICU176709/ABeICU/data/ADMISSIONS.pickle', compression = 'zip')
# Printing original values
print()
print('original no. of admissions: ', len(df['ADMISSION_ID'].unique()))
print('original no. of patients: ', len(df['PATIENT_ID'].unique()))
print()
#-----------------------------------------------------------------------------
# Filtering admissions with ICU LOS >= 24h
df = df[df['ICU_LOS_24H_FLAG'] == 1].reset_index(drop = True)
# Printing values for LOS >= 24h
print('LOS >= 24h no. of admissions: ', len(df['ADMISSION_ID'].unique()))
print('LOS >= 24h no. of patients: ', len(df['PATIENT_ID'].unique()))
print()
#-----------------------------------------------------------------------------
# Calculating ICU LOS
df['ICU_LOS'] = df.apply(lambda x: (x['ICU_DISCH_DATETIME'] - x['ICU_ADMIT_DATETIME']).days, axis = 1)
# Excluding outliers based on ICU LOS (top 2th percentile = '>30 days')
df = df[df['ICU_LOS'] < df['ICU_LOS'].quantile(0.98)].reset_index(drop = True)
# Printing values for LOS < 30d
print('LOS < 30d no. of admissions: ', len(df['ADMISSION_ID'].unique()))
print('LOS < 30d no. of patients: ', len(df['PATIENT_ID'].unique()))
print()
#-----------------------------------------------------------------------------
df = pd.read_pickle('/project/M-ABeICU176709/delirium/data/inputs/master/master_input.pickle',
compression = 'zip')
# Printing values after merging all data sources
print('no. of admissions after merging: ', len(df['ADMISSION_ID'].unique()))
print('no. of patients after merging: ', len(df['PATIENT_ID'].unique()))
print('no. of records after opening in periods: ', len(df))
print()
#-----------------------------------------------------------------------------
df = df[df['period'] >= 1].reset_index(drop = True)
# Printing rows after excluding first 24h
print('no. of admissions after excluding first 24h records: ', len(df['ADMISSION_ID'].unique()))
print('no. of patients after excluding first 24h records: ', len(df['PATIENT_ID'].unique()))
print('no. of records after excluding first 24h records: ', len(df))
print()
#-----------------------------------------------------------------------------
df.dropna(subset = ['delirium_12h', 'delirium_24h'], inplace = True)
# Printing rows after excluding records w/o delirium_12h and delirium_24h
print('no. of admissions after excluding delirium NaNs (records): ', len(df['ADMISSION_ID'].unique()))
print('no. of patients after excluding delirium NaNs (records): ', len(df['PATIENT_ID'].unique()))
print('no. of records after excluding delirium NaNs (records): ', len(df))
print()
#-----------------------------------------------------------------------------
FILES=[
'/project/M-ABeICU176709/delirium/data/inputs/master/master_train.pickle',
'/project/M-ABeICU176709/delirium/data/inputs/master/master_validation.pickle',
'/project/M-ABeICU176709/delirium/data/inputs/master/master_calibration.pickle',
'/project/M-ABeICU176709/delirium/data/inputs/master/master_test.pickle'
]
for FILE in FILES:
df = pd.read_pickle(FILE, compression='zip')
print(FILE)
print('no. of admissions: ', len(df['ADMISSION_ID'].unique()))
print('no. of patients: ', len(df['PATIENT_ID'].unique()))
print('no. of records: ', len(df))
print('no. records w/ delirium_12h: ', df['delirium_12h'].sum())
print('no. records w/ delirium_24h: ', df['delirium_24h'].sum())
print()
| 2.953125 | 3 |
roblox/utilities/url.py | Warhawk947/ro.py | 1 | 12789570 | """
This module contains functions and objects used internally by ro.py to generate URLs.
"""
root_site = "roblox.com"
cdn_site = "rbxcdn.com"
class URLGenerator:
"""
Generates URLs based on a chosen base URL.
Attributes:
base_url: The base URL
"""
def __init__(self, base_url: str):
self.base_url = base_url
def get_subdomain(self, subdomain: str, protocol: str = "https") -> str:
"""
Returns the full URL of a subdomain, given the base subdomain name.
Arguments:
subdomain: subdomain for the website
protocol: protocol used
"""
return f"{protocol}://{subdomain}.{self.base_url}"
def get_url(
self,
subdomain: str,
path: str = "",
base_url: str = None,
protocol: str = "https",
) -> str:
"""
Returns a full URl, given a subdomain name, protocol, and each.
Arguments:
subdomain: subdomain for the website
protocol: protocol used
path: path to the data
base_url: base url
"""
if base_url is None:
base_url = self.base_url
return f"{protocol}://{subdomain}.{base_url}/{path}"
| 3.578125 | 4 |
python/lib/lib_care/measure/get_tips_local.py | timtyree/bgmc | 0 | 12789571 | from ..my_initialization import *
# @njit
def compute_all_spiral_tips(img,dimgdt,level1,level2,width=200,height=200):
#compute all spiral tips present
retval = find_intersections(img,dimgdt,level1,level2,theta_threshold=theta_threshold)
# level2=V_threshold
# retval = find_intersections(img1,img2,level1,level2,theta_threshold=theta_threshold)
lst_values_x,lst_values_y,lst_values_theta, lst_values_grad_ux, lst_values_grad_uy, lst_values_grad_vx, lst_values_grad_vy = retval
return format_spiral_tips(lst_values_x,lst_values_y,lst_values_theta, lst_values_grad_ux,
lst_values_grad_uy, lst_values_grad_vx, lst_values_grad_vy)
# @njit
def format_spiral_tips(lst_values_x,lst_values_y,lst_values_theta, lst_values_grad_ux, lst_values_grad_uy, lst_values_grad_vx, lst_values_grad_vy):
x_values = np.array(lst_values_x)
y_values = np.array(lst_values_y)
# EP states given by bilinear interpolation with periodic boundary conditions
v_lst = interpolate_img(x_values,y_values,width,height,img=img)
dvdt_lst = interpolate_img(x_values,y_values,width,height,img=dimgdt)
n_tips = x_values.size
dict_out = {
't': float(t),
'n': int(n_tips),
'x': list(lst_values_x),
'y': list(lst_values_y),
'theta': list(lst_values_theta),
'grad_ux': list(lst_values_grad_ux),
'grad_uy': list(lst_values_grad_uy),
'grad_vx': list(lst_values_grad_vx),
'grad_vy': list(lst_values_grad_vy),
'v':v_lst,
'dvdt':dvdt_lst,
}
return dict_out
# Example Usage
if __name__=="__main__":
pass
# #compute all spiral tips present
# V_threshold=20.#10.#0.#mV
# level1=V_threshold
# # theta_threshold=0.
# level2=0.
#
# #update texture namespace
# inVc,outVc,inmhjdfx,outmhjdfx,dVcdt=unstack_txt(txt)
# # txt=stack_txt(inVc,outVc,inmhjdfx,outmhjdfx,dVcdt)
#
# img=inVc[...,0]
# dimgdt=dVcdt[...,0]
# width=200;height=200
# dict_out=compute_all_spiral_tips(t,img,dimgdt,level1,level2,width=width,height=height)
# dict_out_instantaneous=dict_out
| 2.375 | 2 |
app.py | muarachmann/cgsbot | 4 | 12789572 | <filename>app.py<gh_stars>1-10
from flask import Flask, request, make_response
from slackclient import SlackClient
import json
import os
app = Flask(__name__)
slack_token = os.environ['SLACK_API_TOKEN']
sc = SlackClient(slack_token)
general_channel = "C93RP3CSG"
introductions_channel = "CFQV2533L"
@app.route('/', methods=['GET', 'POST'])
def check():
if request.method == 'GET':
return make_response("These are not the slackbots you're looking for.", 404)
else:
events_data = json.loads(request.data.decode('utf-8'))
if 'challenge' in events_data:
return make_response(
events_data.get('challenge'), 200, {'content_type': 'application/json'}
)
if "event" in events_data:
event_type = events_data["event"]["type"]
if event_type == "app_mention":
text = events_data["event"]["text"].lower()
user_id = events_data["event"]["user"]
if "hi" in text:
notify_slack("Hi <@" + user_id + ">, what can I do for you? Type\n*channel topic* to see topic for this channel\n*gsoc website* to get link to Google Summer of Code website\n*updates* to get GSoC related updates")
elif "channel topic" in text:
notify_slack("This channel is to help aspiring GSoC students get accepted into the program. If you are not a GSoC veteran(i.e you are here to get help on getting into gsoc) introduce yourself ( 2 Names and Profile picture) so we get to know you.")
elif "gsoc website" in text:
notify_slack("Google Summer of Code official website https://summerofcode.withgoogle.com/")
elif "updates" in text:
notify_slack("There isn't any updates for now.")
else:
notify_slack("Hi <@" + user_id + ">, sorry I can't help you with that. Type\n*channel topic* to see topic for this channel\n*gsoc website* to get link to Google Summer of Code website\n*updates* to get GSoC related updates")
return make_response("", 200)
if event_type == "team_join":
user = events_data["event"]["user"]
welcome_template = "Welcome to Cameroon GSoCers Workspace. \nTell us a little bit about yourself, Your Real Names, Location, \nSpecialty/Department, Your Interests, Any specific questions ? \nLearn Netiquette Rules on http://www.albion.com/netiquette/corerules.html \nLearn How To Ask Smart Questions on http://www.catb.org/esr/faqs/smart-questions.html \nRemember to put your Real Names on your profile and a professional profile picture :) \nTake note of the channel topic and ALL pinned posts"
notify_slack("Hi <@" + user + "> ," + welcome_template)
return make_response("", 200)
return make_response("Not implemented.", 404)
def notify_slack(msg):
sc.api_call(
"chat.postMessage",
channel=general_channel,
text=msg
)
if __name__ == '__main__':
app.run(debug=True, use_reloader=True)
| 3.3125 | 3 |
ngboost/api.py | wakamezake/ngboost | 0 | 12789573 | <filename>ngboost/api.py
import numpy as np
from ngboost.ngboost import NGBoost
from ngboost.distns import Bernoulli, Normal, LogNormal
from ngboost.scores import MLE
from ngboost.learners import default_tree_learner
from sklearn.base import BaseEstimator
class NGBRegressor(NGBoost, BaseEstimator):
def __init__(self,
Dist=Normal,
Score=MLE,
Base=default_tree_learner,
natural_gradient=True,
n_estimators=500,
learning_rate=0.01,
minibatch_frac=1.0,
verbose=True,
verbose_eval=100,
tol=1e-4):
assert Dist.problem_type == "regression"
super().__init__(Dist, Score, Base, natural_gradient, n_estimators, learning_rate,
minibatch_frac, verbose, verbose_eval, tol)
def dist_to_prediction(self, dist): # predictions for regression are typically conditional means
return dist.mean()
class NGBClassifier(NGBoost, BaseEstimator):
def __init__(self,
Dist=Bernoulli,
Score=MLE,
Base=default_tree_learner,
natural_gradient=True,
n_estimators=500,
learning_rate=0.01,
minibatch_frac=1.0,
verbose=True,
verbose_eval=100,
tol=1e-4):
assert Dist.problem_type == "classification"
super().__init__(Dist, Score, Base, natural_gradient, n_estimators, learning_rate,
minibatch_frac, verbose, verbose_eval, tol)
def predict_proba(self, X, max_iter=None):
return self.pred_dist(X, max_iter=max_iter).to_prob()
def staged_predict_proba(self, X, max_iter=None):
return [dist.to_prob() for dist in self.staged_pred_dist(X, max_iter=max_iter)]
def dist_to_prediction(self, dist): # returns class assignments
return np.argmax(dist.to_prob(), 1)
class NGBSurvival(NGBoost, BaseEstimator):
def __init__(self,
Dist=LogNormal,
Score=MLE,
Base=default_tree_learner,
natural_gradient=True,
n_estimators=500,
learning_rate=0.01,
minibatch_frac=1.0,
verbose=True,
verbose_eval=100,
tol=1e-4):
assert Dist.problem_type == "survival"
super().__init__(Dist, Score, Base, natural_gradient, n_estimators, learning_rate,
minibatch_frac, verbose, verbose_eval, tol)
def dist_to_prediction(self, dist): # predictions for regression are typically conditional means
return dist.mean()
| 2.828125 | 3 |
xml.py | KirillDmit/xmljson | 0 | 12789574 | import xml.etree.ElementTree as ET
from urllib.request import urlopen
import json
data = urlopen('https://lenta.ru/rss').read().decode('utf8')
root = ET.fromstring(data)
items = root.find('channel').findall('item')
result = []
for item in items:
for tag in ['pubDate', 'title']:
result.append({tag: item.find(tag).text})
with open('news.json', 'w', encoding='utf8') as f:
json.dump(result, f)
| 2.78125 | 3 |
python/pythonstartup.py | andrewpsp/dotfiles | 249 | 12789575 | def _init():
import atexit
import os
import sys
try:
import readline
except Exception:
readline = None
import types
import time
import uuid
import json
import pprint
import hashlib
import subprocess
import datetime
try:
import __builtin__
except ImportError:
import builtins as __builtin__
PY2 = sys.version_info[0] == 2
__import__('rlcompleter')
histdir = os.path.expanduser('~/.pyhist')
try:
os.makedirs(histdir)
except OSError:
pass
if PY2:
text_type = unicode
else:
text_type = str
def _b(x):
if not isinstance(x, bytes):
x = x.encode('utf-8')
return x
histfile = os.path.join(histdir, hashlib.sha1(
os.path.normpath(_b(os.path.abspath(sys.prefix)))).hexdigest())
if readline is not None:
try:
readline.read_history_file(histfile)
except IOError:
pass
if 'libedit' in readline.__doc__:
readline.parse_and_bind("bind '\t' rl_complete")
else:
readline.parse_and_bind("tab: complete")
atexit.register(readline.write_history_file, histfile)
def _magic_uuid(val=None):
if val is None:
return uuid.uuid4()
elif isinstance(val, uuid.UUID):
return val
elif len(val) == 16:
return uuid.UUID(bytes=val)
return uuid.UUID(val)
def _dump_json(x, as_string=False, indent=2, cp=False):
s = '\n'.join(x.rstrip() for x in json.dumps(x, indent=indent).rstrip().splitlines())
if cp:
_copy(s)
if as_string:
return s
print(s)
def _cat(path):
with open(path, 'rb') as f:
return f.read()
def _tcat(path):
return _cat(path).decode('utf-8')
def _paste():
return subprocess.Popen(['pbpaste'], stdout=subprocess.PIPE).communicate()[0]
def _tpaste():
return _paste().decode('utf-8')
def _jpaste():
return json.loads(_paste())
def _copy(val):
if isinstance(val, text_type):
val = val.encode('utf-8')
return subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE).communicate(val)
def _jcopy(val, indent=None):
_copy(_dump_json(val, indent=indent, as_string=True))
helpers = types.ModuleType('helpers')
helpers.histfile = histfile
helpers.pp = pprint.pprint
helpers.uuid = _magic_uuid
helpers.UUID = uuid.UUID
helpers.uuid3 = uuid.uuid3
helpers.uuid4 = uuid.uuid4
helpers.uuid5 = uuid.uuid5
helpers.dt = datetime.datetime
helpers.datetime = datetime.datetime
helpers.td = datetime.timedelta
helpers.timedelta = datetime.timedelta
helpers.time = time.time
helpers.j = _dump_json
helpers.cat = _cat
helpers.tcat = _tcat
helpers.cp = _copy
helpers.jcp = _jcopy
helpers.copy = _copy
helpers.jcopy = _jcopy
helpers.paste = _paste
helpers.tpaste = _tpaste
helpers.jpaste = _jpaste
__builtin__.h = helpers
__builtin__.true = True
__builtin__.false = False
__builtin__.null = None
_init()
del _init
| 2.125 | 2 |
tests/test_set.py | mike0sv/pyjackson | 20 | 12789576 | <gh_stars>10-100
from typing import Set
from pyjackson import serialize
from pyjackson.core import Comparable
from pyjackson.decorators import make_string
from tests.conftest import serde_and_compare
@make_string
class AClass(Comparable):
def __init__(self, value: str):
self.value = value
def __hash__(self):
return hash(self.value)
def test_set():
value = {AClass('a'), AClass('b')}
serde_and_compare(value, Set[AClass])
assert serialize(value, Set[AClass]) in [[{'value': 'a'}, {'value': 'b'}],
[{'value': 'b'}, {'value': 'a'}]]
def test_set_hint():
@make_string
class CClass(Comparable):
def __init__(self, value: Set[str]):
self.value = value
value = CClass({'a', 'b'})
serde_and_compare(value)
assert serialize(value) in [
{'value': ['a', 'b']},
{'value': ['b', 'a']}
]
| 2.828125 | 3 |
chad_finance/api/urls.py | StfnC/chad-finance | 0 | 12789577 | from django.urls import path
from .views import *
urlpatterns = [
# Portfolio urls
path('portfolio/', PortfolioAPIView.as_view()),
path('portfolio/data/', PortfolioChartDataView.as_view()),
# Trade urls
path('trade/<int:pk>/', TradeRetrieveAPIView.as_view()),
path('trade/', TradeCreateAPIView.as_view()),
path('trade/all/', TradeListAPIView.as_view()),
# User urls
path('user/delete/', DeleteAccountView.as_view()),
# Autres
path('search/', SearchSymbolView.as_view()),
path('symbol/', SymbolInfoView.as_view()),
]
| 1.796875 | 2 |
settings.py | joeltg/nsfw | 1 | 12789578 | <filename>settings.py
# Set this to the path to Caffe installation on your system
caffe_root = "/usr/local/caffe/python"
gpu = False
# -------------------------------------
# These settings should work by default
# DNN being visualized
# These two settings are default, and can be overriden in the act_max.py
net_weights = "nets/caffenet/bvlc_reference_caffenet.caffemodel"
net_definition = "nets/caffenet/caffenet.prototxt"
# Generator DNN
generator_weights = "nets/upconv/fc6/generator.caffemodel"
generator_definition = "nets/upconv/fc6/generator.prototxt"
# Encoder DNN
encoder_weights = "nets/caffenet/bvlc_reference_caffenet.caffemodel"
encoder_definition = "nets/caffenet/caffenet.prototxt"
| 1.890625 | 2 |
SOSS/trace/calibrate_tracepol.py | njcuk9999/jwst-mtl | 1 | 12789579 | <reponame>njcuk9999/jwst-mtl<gh_stars>1-10
import numpy as np
from astropy.io import fits
import matplotlib.pylab as plt
from scipy import interpolate
from scipy.optimize import least_squares
import sys
sys.path.insert(0, '/genesis/jwst/github/jwst-mtl/')
import SOSS.trace.tracepol as tp
from SOSS.dms.soss_centroids import get_soss_centroids
def get_cv3_tracepars(order=1, debug=False):
"""
Return the polynomial fit of the spectral pixel --- wavelength
relation, based on the few observed LED sources at Cryo Vacuum
Campaign (CV3).
:param order:
:param debug:
:return: Set of polynomial coefficients for the spectral pixels
versus the wavelength (in microns).
"""
if order == 1:
# Measured positions of the laser light sources at CV3
# (measured in the native pixel coordinates).
spatpix = 256 - np.array([210, 218, 218, 190])
specpix = 2048 - np.array([239, 499, 745, 1625])
w = np.array([1.06262, 1.30838, 1.54676, 2.410]) # microns
# Fit the specpix vs wavelength
param_spec = np.polyfit(w, specpix, 2)
if debug:
wfit = np.linspace(0.8, 2.9, 200)
xfit = np.polyval(param_spec, wfit)
plt.figure(figsize=(10, 5))
plt.scatter(w, specpix)
plt.plot(wfit, xfit)
plt.show()
if order == 2:
# Measured positions of the laser light sources at CV3
# (measured in the native pixel coordinates).
spatpix = 256 - np.array([60, 161, 161])
specpix = 2048 - np.array([395, 1308, 1823])
w = np.array([0.6412, 1.06262, 1.30838]) # microns
# Fit the specpix vs wavelength
param_spec = np.polyfit(w, specpix, 2)
if debug:
wfit = np.linspace(0.6, 1.4, 200)
xfit = np.polyval(param_spec, wfit)
plt.figure(figsize=(10, 5))
plt.scatter(w, specpix)
plt.plot(wfit, xfit)
plt.show()
if order == 3:
# Measured positions of the laser light sources at CV3
# (measured in the native pixel coordinates).
# WARNING - only one LED was actually observed:
# lambda = 0.6412 at position 256-30 and 2048-1040.
# Once we had the optics model calibrated, we used
# it to determine that the trace covers from
# 0.60378 to 0.95575 microns between specpix=1 and
# specpix=1137. The slope is therefore:
# d(lambda)/dx = -3.095602463e-4
dldx = -3.095602463e-4
# We can anchor from the one LED point
xintercept = (2048-1040) - 0.6412/dldx
# Fit the specpix vs wavelength, order 1
param_spec = np.array([1.0/dldx, xintercept])
if debug:
wfit = np.linspace(0.6, 0.9, 200)
xfit = np.polyval(param_spec, wfit)
plt.figure(figsize=(10, 5))
plt.scatter(w, specpix)
plt.plot(wfit, xfit)
plt.show()
return param_spec
def cv3_wavelength_to_specpix(wavelength=None, order=1):
"""
Return the spectral pixel positions for the input wavelengths
supplied, based on the CV3 solution.
:param order: 1, 2, or 3
:param wavelength: in microns, the wavelength array for which
positions are requested.
:return:
"""
# Get the CV3 specpix vs wavelength trace fit parameters as well
# as the spatpix vs. specpix fit parameters.
param_spec = get_cv3_tracepars(order=order)
# wfit and xfit are for displaying the fit
# Compute the spectral pixel and spatial pixel positions based on
# the input wavelength.
spectralpixel = np.polyval(param_spec, wavelength)
return spectralpixel
def cv3_specpix_to_wavelength(specpix, order=1):
# Get the CV3 specpix vs wavelength trace fit parameters
param_spec = get_cv3_tracepars(order=order)
# Generate a dense realization
w = np.linspace(0.5, 3.0, 5001)
x = np.polyval(param_spec, w)
# Fit the other way around (w vs x)
param = np.polyfit(x, w, 11)
wavelength = np.polyval(param, specpix)
return wavelength
def apply_calibration(param, x, y):
"""
The rotation+offset transformation when one wants to apply the
best fit parameters.
:param param: A length=3 array: angle, origin in x, origin in y
:param x: x position of input
:param y: y position of input
:return: x_rot, y_rot: x,y positions after rotation
"""
theta = param[0]
x0 = param[1]
y0 = param[2]
angle = np.deg2rad(theta)
dx, dy = x - x0, y - y0
x_rot = np.cos(angle) * dx - np.sin(angle) * dy + x0
y_rot = np.sin(angle) * dx + np.cos(angle) * dy + y0
return x_rot, y_rot
def calibrate_tracepol(both_orders=True, debug=False):
"""
Calibrate the tracepol default rotation+offsets based on the CV3
deep stack and the get_soss_centroid function.
:return:
"""
# Optics model reference file
optmodel = '/genesis/jwst/jwst-ref-soss/trace_model/NIRISS_GR700_trace_extended.csv'
# Read the CV3 deep stack and bad pixel mask
bad = fits.getdata('/genesis/jwst/userland-soss/loic_review/badpix_DMS.fits')
im = fits.getdata('/genesis/jwst/userland-soss/loic_review/stack_256_ng3_DMS.fits')
# im is the dataframe, bad the bad pixel map
badpix = np.zeros_like(bad, dtype='bool')
badpix[~np.isfinite(bad)] = True
# Interpolate bad pixels
im = interpolate_badpixels(im, badpix)
# Measure the trace centroid position for the deep stack image.
centroids = get_soss_centroids(im, subarray='SUBSTRIP256', apex_order1=None,
mask=badpix, verbose=False)
x_o1 = centroids['order 1']['X centroid']
y_o1 = centroids['order 1']['Y centroid']
w_o1 = centroids['order 1']['trace widths']
pars_o1 = centroids['order 1']['poly coefs']
x_o2 = centroids['order 2']['X centroid']
y_o2 = centroids['order 2']['Y centroid']
w_o2 = centroids['order 2']['trace widths']
pars_o2 = centroids['order 2']['poly coefs']
x_o3 = centroids['order 3']['X centroid']
y_o3 = centroids['order 3']['Y centroid']
w_o3 = centroids['order 3']['trace widths']
pars_o3 = centroids['order 3']['poly coefs']
# Wavelengths at which the measured traces and the
# optics model traces are going to be compared for the fit.
wavelength_o1 = np.linspace(0.9, 2.8, 50)
wavelength_o2 = np.linspace(0.6, 1.4, 50)
wavelength_o3 = np.linspace(0.6, 0.95, 50)
# Calibrate in wavelength the measured traces and make a
# realization of positions at a few selected wavelengths.
# ORDER 1
w_o1 = cv3_specpix_to_wavelength(x_o1, order=1)
# Generate a transformation wavelength --> specpix
# based on the measured x positions and calibrated wavelengths
f_w2x = interpolate.interp1d(w_o1, x_o1)
# Apply it for the few selected wavelengths for later fit
x_obs_o1 = f_w2x(wavelength_o1)
# Generate a transformation wavelength --> spatpix
# based on the measured y positions and calibrated wavelengths
f_w2y = interpolate.interp1d(w_o1, y_o1)
# Apply the wavelength --> spatpix relation to a few points
y_obs_o1 = f_w2y(wavelength_o1)
# ORDER 2
w_o2 = cv3_specpix_to_wavelength(x_o2, order=2)
# Generate a transformation wavelength --> specpix
# based on the measured x positions and calibrated wavelengths
f_w2x = interpolate.interp1d(w_o2, x_o2)
# Apply it for the few selected wavelengths for later fit
x_obs_o2 = f_w2x(wavelength_o2)
# Generate a transformation wavelength --> spatpix
# based on the measured y positions and calibrated wavelengths
f_w2y = interpolate.interp1d(w_o2, y_o2)
# Apply the wavelength --> spatpix relation to a few points
y_obs_o2 = f_w2y(wavelength_o2)
# ORDER 3
w_o3 = cv3_specpix_to_wavelength(x_o3, order=3)
# Generate a transformation wavelength --> specpix
f_w2x = interpolate.interp1d(w_o3, x_o3)
# Apply it for the few selected wavelengths for later fit
x_obs_o3 = f_w2x(wavelength_o3)
# Generate a transformation wavelength --> spatpix
# based on the measured y positions and calibrated wavelengths
f_w2y = interpolate.interp1d(w_o3, y_o3)
# Apply the wavelength --> spatpix relation to a few points
y_obs_o3 = f_w2y(wavelength_o3)
# For order 3, what does the model say about the wavelength
# calibration x_fit_o3 vs w_fit_o3? Oh! But these 3 arrays
# are badly calibrated. Do from the model instead.
# print('Order 3 x, y, wavelength')
# for i in range(np.size(w_o3)):
# print(x_o3[i], y_o3[i], w_o3[i])
# Call tracepol's optics model then compute rotation offsets by
# minimizing deviations to either only order 1 or all orders
# simultaneously.
# Call tracepol, disabling the default rotation, back to original
# Optics Model. x/y_mod_N are realization of the model at a few
# wavelengths.
param = tp.get_tracepars(filename=optmodel, disable_rotation=True)
x_mod_o1, y_mod_o1, mask_mod_o1 = tp.wavelength_to_pix(wavelength_o1,
param, m=1,
frame='dms',
subarray='SUBSTRIP256',
oversample=1)
x_mod_o2, y_mod_o2, mask_mod_o2 = tp.wavelength_to_pix(wavelength_o2,
param, m=2,
frame='dms',
subarray='SUBSTRIP256',
oversample=1)
x_mod_o3, y_mod_o3, mask_mod_o3 = tp.wavelength_to_pix(wavelength_o3,
param, m=3,
frame='dms',
subarray='SUBSTRIP256',
oversample=1)
if debug:
# Check if it all makes sense
plt.figure(figsize=(10, 3))
plt.scatter(x_mod_o1, y_mod_o1)
plt.scatter(x_mod_o2, y_mod_o2)
plt.scatter(x_mod_o3, y_mod_o3)
plt.scatter(x_obs_o1, y_obs_o1)
plt.scatter(x_obs_o2, y_obs_o2)
plt.scatter(x_obs_o3, y_obs_o3)
plt.show()
plt.figure(figsize=(10, 10))
plt.scatter(wavelength_o1, x_obs_o1)
plt.scatter(wavelength_o1, x_mod_o1)
plt.show()
# What orders should be used for fitting for rotation?
if both_orders:
# Package the Orders 1 and 2 model points and observation points
print('Fitting orders 1 and 2 in obtaining best rotation')
x_mod = np.concatenate((x_mod_o1, x_mod_o2), axis=None)
y_mod = np.concatenate((y_mod_o1, y_mod_o2), axis=None)
x_obs = np.concatenate((x_obs_o1, x_obs_o2), axis=None)
y_obs = np.concatenate((y_obs_o1, y_obs_o2), axis=None)
xy_obs = np.array([x_obs, y_obs])
else:
# Package the Orders 1 ONLY model points and observation points
print('Fitting only first order in obtaining best rotation')
x_mod = np.copy(x_mod_o1)
y_mod = np.copy(y_mod_o1)
x_obs = np.copy(x_obs_o1)
y_obs = np.copy(y_obs_o1)
xy_obs = np.array([x_obs, y_obs])
# These 2 functions need to be there in the code because they use
# variables declared outside.
def fmodel(param):
# That is the transformation matrix coded to perform the fit
# Note that the x_mod and y_mod are outside variables that
# need to be declared before for this to work.
theta = param[0]
x0 = param[1]
y0 = param[2]
angle = np.deg2rad(theta)
dx, dy = x_mod - x0, y_mod - y0 # x_mod and y_mod are global variable
x_rot = np.cos(angle) * dx - np.sin(angle) * dy + x0
y_rot = np.sin(angle) * dx + np.cos(angle) * dy + y0
return np.array([x_rot, y_rot])
def f2minimize(param):
# Minimize the difference between observations and model points
return (xy_obs - fmodel(param)).flatten()
# Informed guess for origin is the CLEAR sweet spot: in DMS coords: x,y=(2048-100),(256-850)=1948,-596
param_guess = [-1.3868425075, 1577.9020186702, -1109.1909267381]
res2 = least_squares(f2minimize, param_guess, ftol=1e-12)
# bounds=([-np.inf,-np.inf,-np.inf,-0.0001,-0.0001],[np.inf,np.inf,np.inf,0,0])) - no need Do the maths
param_bestfit = res2.x
print('Best fit parameters:', param_bestfit)
if True:
print('cost = {:}'.format(res2.cost))
print('Best fit parameters (in DMS coordinates):')
print('theta = {:15.10f}'.format(res2.x[0]))
print('origin_x = {:15.10f}'.format(res2.x[1]))
print('origin_y = {:15.10f}'.format(res2.x[2]))
# print('offset_x = {:15.10f}'.format(res2.x[3]))
# print('offset_y = {:15.10f}'.format(res2.x[4]))
print()
print('Best fit parameters (in native (ds9) coordinates):')
print('theta = {:15.10f}'.format(-res2.x[0]))
print('origin_x = {:15.10f}'.format(256-res2.x[2]))
print('origin_y = {:15.10f}'.format(2048-res2.x[1]))
# print('offset_x = {:15.10f}'.format(-res2.x[4]))
# print('offset_y = {:15.10f}'.format(-res2.x[3]))
print()
print('Once converted to native (aka ds9) pixel coordinates used by tracepol.py,')
print('this becomes:')
print('get_tracepars(filename=None, origin=np.array([{:}, {:}]),'.format(256-res2.x[2], 2048-res2.x[1]))
print(' angle={:},'.format(-res2.x[0]))
print(' disable_rotation=False):')
# Check that the rotated points overplot the observations
# Convert from dms to ds9 coordinates
x_fit_o1, y_fit_o1 = apply_calibration(param_bestfit, x_mod_o1, y_mod_o1)
x_fit_o2, y_fit_o2 = apply_calibration(param_bestfit, x_mod_o2, y_mod_o2)
x_fit_o3, y_fit_o3 = apply_calibration(param_bestfit, x_mod_o3, y_mod_o3)
if debug:
# Figure to show the positions for all 3 orders
fig = plt.figure(figsize=(8, 8), constrained_layout=True)
layout = """
AAAAB
AAAAB
AAAAB
AAAAB
CCCC.
"""
frame = fig.subplot_mosaic(layout)
# Colors for the 3 orders in the figure
color_o1 = 'navy'
color_o2 = 'orange'
color_o3 = 'red'
# First recalculate model positions for all observed positions
param = tp.get_tracepars(filename=optmodel, disable_rotation=False)
print(param)
x_mod_o1, y_mod_o1, mask_mod_o1 = tp.wavelength_to_pix(w_o1,
param, m=1,
frame='dms',
subarray='SUBSTRIP256',
oversample=1)
x_mod_o2, y_mod_o2, mask_mod_o2 = tp.wavelength_to_pix(w_o2,
param, m=2,
frame='dms',
subarray='SUBSTRIP256',
oversample=1)
x_mod_o3, y_mod_o3, mask_mod_o3 = tp.wavelength_to_pix(w_o3,
param, m=3,
frame='dms',
subarray='SUBSTRIP256',
oversample=1)
# Determine the wavelength boundaries for nice display purposes
wavebounds1, _ = tp.subarray_wavelength_bounds(param, subarray='SUBSTRIP256', m=1,
specpix_offset=0, spatpix_offset=0)
wavebounds2, _ = tp.subarray_wavelength_bounds(param, subarray='SUBSTRIP256', m=2,
specpix_offset=0, spatpix_offset=0)
wavebounds3, _ = tp.subarray_wavelength_bounds(param, subarray='SUBSTRIP256', m=3,
specpix_offset=0, spatpix_offset=0)
indo1 = (w_o1 >= wavebounds1[0]) & (w_o1 <= wavebounds1[1])
indo2 = (w_o2 >= wavebounds2[0]) & (w_o2 <= wavebounds2[1])
indo3 = (w_o3 >= wavebounds3[0]) & (w_o3 <= wavebounds3[1])
frame['A'].set_xlim((0, 2048))
frame['A'].set_ylim((0, 256))
frame['A'].imshow(np.log10(im), vmin=0.7, vmax=3, origin='lower', aspect='auto')
frame['A'].plot(x_o1, y_o1, color=color_o1, label='Order 1 - CV3')
frame['A'].plot(x_mod_o1, y_mod_o1, linestyle='dashed', color=color_o1, label='Order 1 - Model')
# frame['A'].scatter(x_fit_o1, y_fit_o1, color=color_o1, label='Order 1 - Model rotated')
if x_o2 is not None:
frame['A'].plot(x_o2, y_o2, color=color_o2, label='Order 2 - CV3')
frame['A'].plot(x_mod_o2, y_mod_o2, linestyle='dashed', color=color_o2, label='Order 2 - Model')
# frame['A'].scatter(x_fit_o2, y_fit_o2, color=color_o2, label='Order 2 - Model rotated')
if x_o3 is not None:
frame['A'].plot(x_o3[indo3], y_o3[indo3], color=color_o3, label='Order 3 - CV3')
frame['A'].plot(x_mod_o3[indo3], y_mod_o3[indo3], linestyle='dashed', color=color_o3, label='Order 3 - Model')
# frame['A'].scatter(x_fit_o3, y_fit_o3, color=color_o3, label='Order 3 - Model rotated')
frame['A'].xaxis.set_ticks_position('top')
frame['A'].set_xlabel('Detector Column Position')
frame['A'].yaxis.set_label_position('right')
frame['A'].set_ylabel('Detector Row Position (Stretched)')
frame['A'].legend()
# Position residuals on the x axis
# residuals are
resi_x_o1 = x_o1 - x_mod_o1
resi_x_o2 = x_o2 - x_mod_o2
resi_x_o3 = x_o3 - x_mod_o3
resi_y_o1 = y_o1 - y_mod_o1
resi_y_o2 = y_o2 - y_mod_o2
resi_y_o3 = y_o3 - y_mod_o3
frame['C'].plot([0, 2048], [0, 0], linestyle='dashed', color='black')
frame['C'].plot(x_o1[indo1], resi_x_o1[indo1], color=color_o1)
frame['C'].plot(x_o2[indo2], resi_x_o2[indo2], color=color_o2)
frame['C'].plot(x_o3[indo3], resi_x_o3[indo3], color=color_o3)
frame['C'].set_xlim((0, 2048))
frame['C'].set_ylim((-11, 11))
frame['C'].yaxis.set_ticks_position('left')
frame['C'].yaxis.set_label_position('right')
frame['C'].set_ylabel('X Pixel Deviation')
frame['B'].plot([0, 0], [0, 256], linestyle='dashed', color='black')
frame['B'].plot(resi_y_o1[indo1], y_o1[indo1], color=color_o1)
frame['B'].plot(resi_y_o2[indo2], y_o2[indo2], color=color_o2)
frame['B'].plot(resi_y_o3[indo3], y_o3[indo3], color=color_o3)
frame['B'].set_xlim((-5, 5))
frame['B'].set_ylim((0, 256))
frame['B'].yaxis.set_ticks_position('right')
frame['B'].xaxis.set_ticks_position('top')
frame['B'].set_xlabel('Y Pixel Deviation')
plt.tight_layout()
plt.savefig('/genesis/jwst/userland-soss/loic_review/traces_position_CV3_vs_Optics.png')
plt.show()
return
def interpolate_badpixels(image, badpix):
"""
Interpolate the bad pixels
"""
# Work on a copy of the image
dimy, dimx = np.shape(image)
image_corr = np.copy(image)
# Indices of the bad pixels and any NaN pixel
indy, indx = np.where(badpix | (~np.isfinite(image)))
# Determine the coordinates of the 8 pixels around the bad pixel
x0, x1, y0, y1 = indx-1, indx+1, indy-1, indy+1
# Keep those within the image boundaries
indx0, indx1, indy0, indy1 = x0 < 0, x1 > dimx-1, y0 < 0, y1 > dimy-1
x0[indx0], x1[indx1], y0[indy0], y1[indy1] = 0, dimx-1, 0, dimy-1
# Interpolate pixels one by one
for i in range(np.size(indx)):
badval = np.nanmean(image_corr[y0[i]:y1[i], x0[i]:x1[i]])
image_corr[indy[i], indx[i]] = badval
return image_corr
if __name__ == '__main__':
calibrate_tracepol()
| 2.09375 | 2 |
homework/pre-course assignment/fibonacci_with_array.py | coderica/effective_cpp | 0 | 12789580 | <gh_stars>0
### Libraries
import sys
import math
### Function Definition
def version_response(str):
if sys.version_info[0] > 2: #check python version
response = input(str)
else:
response = raw_input(str)
return(response)
def get_user_input():
start_num, seq_size = '', ''
while not a.isdigit() or not 1 <= int(a) <= 10:
a = version_response("Pick a number between 1 and 10: ")
while not b.isdigit() or not 1 <= int(b) <= 20:
b = version_response("Pick a number between 1 and 20: ")
return [int(a), int(b)]
def fibonacci(n):
fib = ((1+math.sqrt(5))**n-(1-math.sqrt(5))**n)/(2**n*math.sqrt(5)) #fibonacci formula
return(round(fib))
# Runner Code
sequence = []
user_input = get_user_input()
start_num, seq_size = user_input[0], user_input[1]
for index in range(0, seq_size):
next_num = fibonacci(start_num + index)
sequence.append(next_num)
print(*sequence, sep=' ') #pretty print | 3.921875 | 4 |
planning_system/cli/__init__.py | jehboyes/planning_system | 0 | 12789581 | <filename>planning_system/cli/__init__.py
import sys
import os
from importlib import import_module as imp
import click
import planning_system
import sqlalchemy.ext.declarative # for pyinstaller's benefit
import sqlalchemy.ext.automap # for pyinstaller's benefit
import office365.runtime.auth.user_credential # for pyinstaller's benefit
import office365.sharepoint.client_context
import dominate.tags
from sqlalchemy.sql.expression import true
from planning_system.api.functions import resource_path
from planning_system.version import __version__ as v
class Config(object):
"""
CLI configuration object
Used to pass variables through all CLI commands.
Parameters
----------
verbose : bool, optional
Whether or not to print messages passed to ``verbose_print``, by default False.
echo : bool, optional
Whether o not to echo SQL passed to DB, by default False.
environment : str, optional
Section of the config.ini file to use, by default 'PLANNING'.
surpress : bool, optional
Whether or not to surpress confirmation messages, by default False.
debug : bool, optional
Whether or not to perform debug options.
"""
def __init__(self, sys_config, verbose=False, echo=False, surpress=False, debug=False):
self.verbose = verbose
self.echo = echo
self.surpress = surpress
self.sys_config = sys_config
self.d = debug
def __getattr__(self, k):
if k == 'echo':
return self.echo
else:
return getattr(self.sys_config, k)
def verbose_print(self, message, bold=False, **kwargs):
"""
Wraps click echo, for consistent handling of verbose messages.
"""
if self.verbose:
click.secho(message, fg='green', bold=bold, **kwargs)
def confirm(self, message):
"""
Wraps click confirm, skipping confirmation if surpress is True.
"""
if self.surpress:
return true
else:
return click.confirm(message)
def print(self, message):
"""
Used to print important messages in uniform style.
"""
click.secho(message, fg='red', bold=True)
def debug(self, func):
def inner(*args, **kwargs):
func(*args, **kwargs)
if self.d:
return inner
pass
def add_subcommands(parent, file, package):
"""
Add click subcommands according to directory structure.
Parameters
----------
parent : function
Parent function which has commands added to it.
file : str
Filepath of current file (use __file__).
package : str
Name of the current package (use __package__).
"""
p = os.path.abspath(os.path.dirname(file))
files = os.listdir(p)
this_package = sys.modules[package].__name__
modules = [imp(this_package+"."+f.replace(".py", ""), )
for f in files if f[0] != "_"]
commands = [getattr(module, module.__name__[module.__name__.rfind(".")+1:])
for module in modules
if not getattr(module, 'EXCLUDE_FROM_EXE', False)
or not getattr(sys, 'frozen', False)]
for _ in commands:
try:
parent.add_command(_)
except AttributeError:
raise RuntimeError(f"Problem encountered with {_}")
@click.group()
@click.option("--verbose", "-v", is_flag=True, help="Print more information to the console.")
@click.option("--echo", "-e", is_flag=True, help="Print SQL run against database.")
@click.option("--surpress", "-s", is_flag=True, help="Surpress confirmation alerts.")
@click.option("--debug", is_flag=True, help="Show debug messages.")
@click.option("--dbenv", "-d", type=str, default="PRODUCTION",
help="Specify a DB environment (must correspond to section in config).")
@click.pass_context
def ps(config, verbose, dbenv, echo, surpress, debug):
"""
Entry point for the Planning System command line interface.
"""
# Define config object to be passed to subcommands via click.pass_obj
sys_config = planning_system.Config(dbenv)
config.obj = Config(sys_config, verbose, echo, surpress, debug)
config.obj.verbose_print(f"Running Planning System {v} CLI", True)
add_subcommands(ps, __file__, __package__)
| 2.234375 | 2 |
engine/admin.py | x-risk/x-risk | 5 | 12789582 | <filename>engine/admin.py
from django.contrib import admin
from .models import Topic, Source, SearchString, Search, Publication, Assessment, Profile, AssessmentStatus, HumanPrediction, MLModel, MLPrediction, Log
admin.site.register(Topic)
admin.site.register(Source)
admin.site.register(SearchString)
admin.site.register(Search)
admin.site.register(Publication)
admin.site.register(Assessment)
admin.site.register(Profile)
admin.site.register(AssessmentStatus)
admin.site.register(HumanPrediction)
admin.site.register(MLModel)
admin.site.register(MLPrediction)
admin.site.register(Log)
| 1.492188 | 1 |
modules/ceo/docker/src/ceo/ceo/api/record.py | BuddyVolly/sepal | 0 | 12789583 | import os, logging, json, datetime
from flask import session, request, redirect, url_for, jsonify, render_template, send_file, abort
from flask_cors import cross_origin
from .. import app
from .. import mongo
from ..common.utils import import_sepal_auth, requires_auth, generate_id
from ..common.fusiontables import selectRow, getRowId, updateRow, insertRow, deleteRow, FTException
logger = logging.getLogger(__name__)
PROJECT_TYPE_CEP = 'CEP'
PROJECT_TYPE_TRAINING_DATA = 'TRAINING-DATA'
@app.route('/api/record/<id>', methods=['GET'])
@cross_origin(origins=app.config['CO_ORIGINS'])
@import_sepal_auth
@requires_auth
def recordById(id=None):
record = mongo.db.records.find_one({'id': id}, {'_id': False})
if not record:
abort(404)
return jsonify(record), 200
@app.route('/api/record/project_id/<project_id>', methods=['GET'])
@cross_origin(origins=app.config['CO_ORIGINS'])
@import_sepal_auth
@requires_auth
def recordsByProject(project_id=None):
records = mongo.db.records.find({'project_id': project_id}, {'_id': False})
return jsonify(list(records)), 200
@app.route('/api/record', methods=['POST'])
@cross_origin(origins=app.config['CO_ORIGINS'])
@import_sepal_auth
@requires_auth
def recordAdd():
project_id = request.json.get('project_id')
record_id = request.json.get('record_id')
project = mongo.db.projects.find_one({'id': project_id}, {'_id': False})
if not project:
return 'Not Found!', 404
# security check
if project['username'] != session.get('username') and not session.get('is_admin'):
return 'Forbidden!', 403
# update
username = session.get('username')
plot_id = request.json.get('plot').get('id')
mongo.db.records.update({
'project_id': project_id,
'username': username,
'plot.id': plot_id
}, {
'id': record_id,
'project_id': project_id,
'username': username,
'update_datetime': datetime.datetime.utcnow(),
'value': request.json.get('value'),
'plot': {
'id': plot_id,
'YCoordinate': request.json.get('plot').get('YCoordinate'),
'XCoordinate': request.json.get('plot').get('XCoordinate')
}
}, upsert=True)
# sync
if project['type'] == PROJECT_TYPE_TRAINING_DATA:
syncPlotsWithProject(request.json.get('project_id'))
# fusiontables
token = session.get('accessToken')
fusionTableId = project.get('fusionTableId')
if token and fusionTableId:
data = {
'id': request.json.get('plot').get('id'),
'YCoordinate': request.json.get('plot').get('YCoordinate'),
'XCoordinate': request.json.get('plot').get('XCoordinate')
}
data.update(request.json.get('value'))
location = '%s %s' % (data['YCoordinate'], data['XCoordinate'])
try:
columns = selectRow(token, fusionTableId, data.get('id'))
if columns:
rowId = getRowId(token, fusionTableId, data.get('id'))
if rowId:
updateRow(token, fusionTableId, data, columns, rowId, location=location)
else:
insertRow(token, fusionTableId, data, columns, location=location)
except FTException as e:
pass
return 'OK', 200
@app.route('/api/record/<id>', methods=['PUT'])
@cross_origin(origins=app.config['CO_ORIGINS'])
@import_sepal_auth
@requires_auth
def recordModify(id=None):
record = mongo.db.records.find_one({'id': id}, {'_id': False})
if not record:
return 'Not Found!', 404
# security check
if record['username'] != session.get('username') and not session.get('is_admin'):
return 'Forbidden!', 403
# update the record
record.update({
'value': request.json.get('value'),
'update_datetime': datetime.datetime.utcnow()
})
# update
mongo.db.records.update({'id': id}, {'$set': record}, upsert=False)
#
project = mongo.db.projects.find_one({'id': record.get('project_id')}, {'_id': False})
# sync
if project['type'] == PROJECT_TYPE_TRAINING_DATA:
syncPlotsWithProject(record.get('project_id'))
# fusiontables
token = session.get('accessToken')
fusionTableId = project.get('fusionTableId')
if token and fusionTableId:
data = {
'id': record.get('plot').get('id'),
'YCoordinate': record.get('plot').get('YCoordinate'),
'XCoordinate': record.get('plot').get('XCoordinate'),
}
data.update(request.json.get('value'))
location = '%s %s' % (data['YCoordinate'], data['XCoordinate'])
try:
columns = selectRow(token, fusionTableId, data.get('id'))
if columns:
rowId = getRowId(token, fusionTableId, data.get('id'))
if rowId:
updateRow(token, fusionTableId, data, columns, rowId, location=location)
else:
insertRow(token, fusionTableId, data, columns, location=location)
except FTException as e:
pass
return 'OK', 200
@app.route('/api/record/<id>', methods=['DELETE'])
@cross_origin(origins=app.config['CO_ORIGINS'])
@import_sepal_auth
@requires_auth
def recordDelete(id=None):
record = mongo.db.records.find_one({'id': id}, {'_id': False})
if not record:
return 'Not Found!', 404
# security check
if record['username'] != session.get('username') and not session.get('is_admin'):
return 'Forbidden!', 403
# delete
mongo.db.records.delete_one({'id': id})
#
project = mongo.db.projects.find_one({'id': record.get('project_id')}, {'_id': False})
# sync
if project['type'] == PROJECT_TYPE_TRAINING_DATA:
syncPlotsWithProject(record.get('project_id'))
# fusiontables
token = session.get('accessToken')
fusionTableId = project.get('fusionTableId')
if token and fusionTableId:
try:
rowId = getRowId(token, fusionTableId, record.get('plot').get('id'))
if rowId:
deleteRow(token, fusionTableId, rowId)
except FTException as e:
pass
return 'OK', 200
def syncPlotsWithProject(id):
project = mongo.db.projects.find_one({'id': id})
plots = []
records = mongo.db.records.find({'project_id': id})
for record in records:
plots.append(record['plot'])
project.update({
'plots': plots
})
mongo.db.projects.update({'id': id}, {'$set': project}, upsert=False)
return 'OK', 200
| 2.03125 | 2 |
shortner/models.py | shivagangula/shortme | 0 | 12789584 | from django.db import models
# from django.contrib.auth.models import User
from django.urls import reverse
class UrlDetailes(models.Model):
"""
# user field for authanticate
user = models.OneToOneField(User)
# click_n_time means number of times link clicked
click_n_time = models.IntegerField()
# is_active means url availble nor not for redirect
is_active = models.BooleanField()
"""
original_url = models.CharField(max_length=1000)
shorted_url = models.SlugField()
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ["-created"]
def __str__(self):
return f"Short Url for: {self.original_url} is {self.shorted_url}"
def object_check(self):
return f"original url { self.original_url }"
| 2.375 | 2 |
scripts/unpivot.py | TshepisoMokoena/covid19africa | 51 | 12789585 | <gh_stars>10-100
import sys
import os
import pandas as pd
import re
import argparse
from utils import *
def parse_args():
cmd_parser = argparse.ArgumentParser()
cmd_parser.add_argument("-i", "--input_path", default="img/",
help="path to the image file to parse data from")
cmd_parser.add_argument("-m", "--mode", default="batch",
help="Whether to batch process many days images or single day image")
cmd_parser.add_argument("-p", "--preprocess", type=str, default="blur",
help="type of preprocessing to clean the image for OCR. If -p is set to None, OCR will be applied to the grayscale image.")
args = vars(cmd_parser.parse_args())
return args
def main():
args = parse_args()
unpivot_timeseries()
if __name__ == "__main__":
main() | 2.890625 | 3 |
src/69.py | cloudzfy/euler | 12 | 12789586 | # Euler's Totient function, phi(n) [sometimes called the phi function],
# is used to determine the number of numbers less than n which are
# relatively prime to n. For example, as 1, 2, 4, 5, 7, and 8, are all
# less than nine and relatively prime to nine, phi(9) = 6.
# n Relatively Prime phi(n) n/phi(n)
# 2 1 1 2
# 3 1,2 2 1.5
# 4 1,3 2 2
# 5 1,2,3,4 4 1.25
# 6 1,5 2 3
# 7 1,2,3,4,5,6 6 1.1666...
# 8 1,3,5,7 4 2
# 9 1,2,4,5,7,8 6 1.5
# 10 1,3,7,9 4 2.5
# It can be seen that n = 6 produces a maximum n / phi(n) for n <= 10.
# Find the value of n <= 1,000,000 for which n / phi(n) is a maximum.
from math import sqrt
def generate_primes(limit):
is_prime = [True for i in range(limit)]
for i in range(2, int(sqrt(limit))):
if is_prime[i]:
for j in range(i * i, limit, i):
is_prime[j] = False
return filter(lambda x: is_prime[x], range(2, limit))
primes = generate_primes(1000)
ans = 1
for p in primes:
if ans * p > 1000000:
break
ans *= p
print ans
| 3.96875 | 4 |
tests/utils/test_devices.py | jcao1022/tiflash | 1 | 12789587 | import os
import pytest
from tiflash.utils import devices
class TestDevices():
def test_get_devices_directory(self, t_env):
expected = os.path.normpath(t_env['CCS_PATH'] +
'/ccs_base/common/targetdb/devices')
result = devices.get_devices_directory(t_env['CCS_PATH'])
assert result == expected
def test_get_devicetypes(self, t_env):
result = devices.get_devicetypes(t_env['CCS_PATH'])
assert type(result) is list
def test_get_devicetype(self, t_env):
expected = "CC1350F128"
devicexml = os.path.normpath(t_env['CCS_PATH'] +
"/ccs_base/common/targetdb/devices"
"/cc1350f128.xml")
result = devices.get_devicetype(devicexml)
assert result == expected
def test_get_cpu_xml(self, t_env):
expected = os.path.normpath(t_env['CCS_PATH'] +
"/ccs_base/common/targetdb/cpus/"
"cortex_m3.xml")
device_xml = os.path.normpath(t_env['CCS_PATH'] +
"/ccs_base/common/targetdb/devices/"
"cc1350f128.xml")
result = devices.get_cpu_xml(device_xml, t_env['CCS_PATH'])
assert result == expected
def test_get_default_connection_xml(self, t_env):
expected = os.path.normpath(t_env['CCS_PATH'] +
"/ccs_base/common/targetdb/connections/"
"TIXDS110_Connection.xml")
device_xml = os.path.normpath(t_env['CCS_PATH'] +
"/ccs_base/common/targetdb/devices/"
"cc1350f128.xml")
result = devices.get_default_connection_xml(device_xml,
t_env['CCS_PATH'])
assert result == expected
@pytest.mark.parametrize("serno,expected", [
("L100", "CC2650F128"),
("L110", "CC2652R1F"),
("L200", "CC1310F128"),
("L210", "CC1312R1F3"),
("L201", "CC1310F128"),
("L400", "CC1350F128"),
("L401", "CC1350F128"),
("L410", "CC1352R1F3"),
("L420", "CC1352P1F3"),
])
def test_get_device_from_serno(self, t_env, serno, expected):
result = devices.get_device_from_serno(serno, t_env['CCS_PATH'])
assert result == expected
| 2.140625 | 2 |
Petstagram/main/validators/image_size_validator.py | Pavel-Petkov03/Petstagram | 1 | 12789588 | <reponame>Pavel-Petkov03/Petstagram<filename>Petstagram/main/validators/image_size_validator.py
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
@deconstructible
class ValidateImage:
def __init__(self, file):
self.file = file
def __call__(self, max_size):
filesize = self.file.size
if filesize > max_size * 1024 ** 2: # this is in megabytes
raise ValidationError("The file size must be max 5 mb")
| 2.390625 | 2 |
client/xbee-client.py | ddaza/iothax | 0 | 12789589 | import serial
class Client:
serial_connection = serial.Serial(port="/dev/ttyUSB0", timeout=None)
def run(self):
while True:
try:
reading = self.serial_connection.read()
if reading == "":
continue
print "THIS> " + reading
except Exception as err:
print Exception, err
client = Client()
client.run()
| 2.75 | 3 |
py/legacyanalysis/talk-plots.py | manera/legacypipe | 32 | 12789590 | <filename>py/legacyanalysis/talk-plots.py
import matplotlib
matplotlib.use('Agg')
import pylab as plt
#from astrometry.util.fits import *
from legacypipe.runbrick import *
from legacypipe.runbrick_plots import *
from legacypipe.runbrick import _coadds, _get_mod
from astrometry.util.stages import *
from astrometry.util.starutil_numpy import *
from astrometry.sdss.dr9 import DR9
from astrometry.sdss import AsTransWrapper
from astrometry.libkd.spherematch import *
from scipy.ndimage.morphology import binary_dilation
from legacypipe.utils import MyMultiproc
def stage_plots(targetwcs=None, bands=None, W=None, H=None,
coimgs=None, cons=None, tims=None, blobs=None,
cat=None, T2=None, survey=None, **kwargs):
plt.figure(figsize=(8,4))
plt.subplots_adjust(left=0.08, right=0.99, bottom=0.12, top=0.99)
ll = np.linspace(0, 360, 1000)
bb = np.zeros_like(ll)
rg,dg = lbtoradec(ll, bb)
rg2,dg2 = lbtoradec(ll, bb + 10)
rg3,dg3 = lbtoradec(ll, bb - 10)
dall = LegacySurveyData(survey_dir='survey')
ccds = dall.get_ccds()
bricks = dall.get_bricks_readonly()
brick_coverage = dict()
for band in bands:
I = np.flatnonzero(ccds.filter == band)
plt.clf()
ccmap = dict(g='g', r='r', z='m')
plt.plot(ccds.ra[I], ccds.dec[I], '.', color=ccmap[band], alpha=0.1)
plt.plot(rg, dg, 'k-')
plt.plot(rg2, dg2, 'k-', alpha=0.5)
plt.plot(rg3, dg3, 'k-', alpha=0.5)
plt.axis([360,0,-12,36])
plt.xlabel('RA')
plt.ylabel('Dec')
plt.savefig('dr2-%s.png' % band)
II,J,d = match_radec(ccds.ra[I], ccds.dec[I], bricks.ra, bricks.dec,
np.hypot(0.25/2, 0.17))
J = np.unique(J)
hasband = np.zeros(len(bricks), bool)
hasband[J] = True
brick_coverage[band] = hasband
print 'Number of bricks with', band, 'coverage:', len(J)
print 'Bricks with grz coverage:', sum(reduce(np.logical_and, brick_coverage.values()))
T = T2
#mp = MyMultiproc(None, pool=pool)
mp = MyMultiproc(init=runbrick_global_init, initargs=[])
plt.figure(figsize=(6,6))
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
if True:
# SDSS coadd
sdsscoimgs,nil = sdss_coadd(targetwcs, bands)
plt.clf()
dimshow(get_rgb(sdsscoimgs, bands, **rgbkwargs), ticks=False)
plt.savefig('sdss.png')
C = _coadds(tims, bands, targetwcs)
coimgs = C.coimgs
rgb = get_rgb(coimgs, bands, **rgbkwargs)
plt.clf()
dimshow(rgb, ticks=False)
plt.savefig('img.png')
ax = plt.axis()
plt.plot(T.bx0, T.by0+1, 'o', mec=(0,1,0), mfc='none', ms=12)
plt.savefig('srcs.png')
plt.clf()
print 'Blobs:', blobs.dtype, blobs.min(), blobs.max()
#dimshow(rgb, ticks=False)
b0 = (blobs >= 0)
b1 = binary_dilation(b0, np.ones((3,3)))
bout = np.logical_and(b1, np.logical_not(b0))
blobrgb = rgb.copy()
# # set green
blobrgb[:,:,0][bout] = 0.
blobrgb[:,:,1][bout] = 1.
blobrgb[:,:,2][bout] = 0.
plt.clf()
dimshow(blobrgb, ticks=False)
plt.savefig('blobs.png')
plt.clf()
mods = mp.map(_get_mod, [(tim, cat) for tim in tims])
comods,nil = compute_coadds(tims, bands, targetwcs, images=mods)
dimshow(get_rgb(comods, bands, **rgbkwargs))
plt.savefig('mod.png')
nmods = []
resids = []
for tim,mod in zip(tims,mods):
noise = np.random.normal(size=tim.shape)
ie = tim.getInvError()
print 'ie min', ie[ie>0].min(), 'median', np.median(ie)
noise[ie > 0] *= (1. / ie[ie>0])
noise[ie == 0] = 0
nmods.append(mod + noise)
res = tim.getImage() - mod
res[ie == 0] = 0.
resids.append(res)
comods2,nil = compute_coadds(tims, bands, targetwcs, images=nmods)
dimshow(get_rgb(comods2, bands, **rgbkwargs))
plt.savefig('noisymod.png')
res,nil = compute_coadds(tims, bands, targetwcs, images=resids)
dimshow(get_rgb(res, bands, **rgbkwargs_resid))
plt.savefig('resids.png')
return dict(sdsscoimgs=sdsscoimgs, coimgs=coimgs,
comods=comods, comods2=comods2, resids=res)
def stage_plots2(sdsscoimgs=None, coimgs=None,
comods=None, comods2=None, resids=None,
bands=None,
**kwargs):
for band,co in zip(bands, sdsscoimgs):
print 'co', co.shape
plt.clf()
plt.hist(co.ravel(), range=(-0.1, 0.1), bins=100)
plt.title('SDSS %s band' % band)
plt.savefig('sdss-%s.png' % band)
print band, 'band 16th and 84th pcts:', np.percentile(co.ravel(), [16,84])
kwa = dict(mnmx=(-2,10), scales=dict(g=(2,0.02), r=(1,0.03),
z=(0,0.1)))
#z=(0,0.22)))
plt.clf()
dimshow(get_rgb(sdsscoimgs, bands, **kwa), ticks=False)
plt.savefig('sdss2.png')
plt.clf()
dimshow(get_rgb(coimgs, bands, **kwa), ticks=False)
plt.savefig('img2.png')
brick = '1498p017'
stages = ['plots2']
picklePattern = ('pickles/runbrick-z7-%(brick)s-%%(stage)s.pickle' %
dict(brick=brick))
stagefunc = CallGlobalTime('stage_%s', globals())
initargs = {}
kwargs = {}
prereqs = { 'plots': 'writecat',
'plots2': 'plots'}
for stage in stages:
runstage(stage, picklePattern, stagefunc, prereqs=prereqs, force=stages,
initial_args=initargs, write=True, **kwargs)
| 1.929688 | 2 |
listing_22-6.py | PrinceChou/Play-Python-with-Alisa | 0 | 12789591 | <filename>listing_22-6.py
# Listing_22-6.py
# Copyright Warren & <NAME>, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Using pickle to store a list to a file
import pickle
my_list = ['Fred', 73, 'Hello there', 81.9876e-13]
pickle_file = open('my_pickled_list.pkl', 'w')
pickle.dump(my_list, pickle_file)
pickle_file.close()
| 3.546875 | 4 |
tools/cfn-resource-list.py | PatMyron/aws-cfn-resource-specs | 25 | 12789592 | import json
from gzip import GzipFile
import os
from datetime import date
from pathlib import Path
from distutils.version import StrictVersion
# Third-Party modules
import requests
import git
from github import Github
from packaging import version
from boto3 import client, resource
def resource_spec_versions(cfn_resource_spec_bucket):
'''Return all available versions of a CFN Resource Specification file'''
versions = set()
# Create a client
conn = client('s3', region_name='us-east-1')
# Create a reusable Paginator
paginator = conn.get_paginator('list_objects')
# Create a PageIterator from the Paginator
page_iterator = paginator.paginate(Bucket=cfn_resource_spec_bucket)
for page in page_iterator:
for key in page['Contents']:
prefix,file = key['Key'].split('/',1)
versions.add(prefix)
return versions
def resource_spec_date(version, cfn_resource_spec_bucket, cfn_json):
'''Find the release date (last modified timestamp) of a CFN Resource Specification file'''
key = version + <KEY> + cfn_json
s3 = resource('s3',region_name='us-east-1')
obj = s3.Object(cfn_resource_spec_bucket, key)
return str(obj.last_modified.date())
def resource_download(resource_spec_dir, version, region_name, cfn_resource_spec_bucket, cfn_json, standard=True):
'''Download a CFN Resource Specification file'''
key = version + <KEY> + cfn_json
if standard:
resource('s3',region_name='us-east-1').Bucket(cfn_resource_spec_bucket).download_file(key, f"{resource_spec_dir}/{region_name}/{cfn_json}")
else:
extension = 'com'
if region_name.startswith('cn'):
extension += '.cn'
url = f"https://{cfn_resource_spec_bucket}.s3.{region_name}.amazonaws.{extension}/{version}/gzip/{cfn_json}"
print(f"[INFO][{region_name}][DOWNLOADING]Latest resource spec")
cfn_request = requests.get(url)
print(f"[INFO][{region_name}][DOWNLOADED]Latest resource spec")
with open(f"{resource_spec_dir}/{region_name}/{cfn_json}", 'wb') as cfn_download:
cfn_download.write(cfn_request.content)
return Path.cwd().joinpath(f"{resource_spec_dir}/{region_name}/{cfn_json}")
def resource_json_sort_keys(resource_json_file_path, gzipped = True):
# Open JSON
if gzipped:
with GzipFile(resource_json_file_path, 'r') as file_to_read:
json_contents = json.loads(file_to_read.read().decode('utf-8'))
else:
with open(resource_json_file_path, 'r') as file_to_read:
json_contents = json.loads(file_to_read.read())
# Rewrite downloaded JSON with sorted keys for proper git diff generation
with open(resource_json_file_path, 'w') as file_to_dump:
json.dump(json_contents, file_to_dump, indent=2, sort_keys=True)
resource_spec_dir = 'specs'
cfn_json = 'CloudFormationResourceSpecification.json'
debugging = False
if Path.cwd().joinpath("regions.json").exists():
with open(Path.cwd().joinpath("regions.json"), 'r') as file_to_read:
regions_data = json.loads(file_to_read.read())
supported_regions = []
oddballs = []
for key, value in regions_data.items():
supported_regions.append(key)
if value["normal_s3_download"] == False:
oddballs.append(key)
regions_data = None
if Path.cwd().joinpath("all-cfn-versions.json").exists():
with open(Path.cwd().joinpath("all-cfn-versions.json"), 'r') as file_to_read:
region_details_old = json.loads(file_to_read.read())
else:
region_details_old = {}
region_details={}
version_master=[]
updated_regions=[]
for region_name in supported_regions:
print(f"[INFO][{region_name}][SEARCHING]Querying for new versions")
Path.mkdir(Path.cwd().joinpath(f"{resource_spec_dir}/{region_name}"),exist_ok=True)
cfn_resource_spec_bucket=f"cfn-resource-specifications-{region_name}-prod"
if region_name not in oddballs:
cfn_resources = resource_spec_versions(
cfn_resource_spec_bucket
)
sorted_resources = list(cfn_resources)
sorted_resources.sort()
current_version = Path.cwd().joinpath(f"{resource_spec_dir}/{region_name}/{cfn_json}")
new_version_available = False
if current_version.exists():
with current_version.open('r') as cfn_content:
json_contents = json.loads(cfn_content.read())
old_version = json_contents['ResourceSpecificationVersion']
if sorted_resources[-2] != old_version:
new_version_available = True
else:
new_version_available = True
if new_version_available:
for version in sorted_resources[:-1]:
if (not region_details_old.get(region_name) or not region_details_old[region_name].get(version)) and version != 'latest':
spec_date = resource_spec_date(
version, cfn_resource_spec_bucket, cfn_json
)
if region_details_old.get(region_name) and region_details_old[region_name].get(version) and region_details_old[region_name][version] == spec_date:
break
print(f"[INFO][{region_name}][DISCOVERED]New Resource Spec: {version} - {spec_date}")
if region_details.get(region_name):
region_details[region_name][version] = spec_date
else:
region_details[region_name] = {version : spec_date}
if version not in version_master and version != 'latest':
version_master.append(version)
if region_name not in updated_regions:
updated_regions.append(region_name)
else:
current_version = Path.cwd().joinpath(f"{resource_spec_dir}/{region_name}/{cfn_json}")
if current_version.exists():
with current_version.open('r') as cfn_content:
json_contents = json.loads(cfn_content.read())
old_version = json_contents['ResourceSpecificationVersion']
else:
old_version = None
downloaded_cfn = resource_download(
resource_spec_dir, 'latest', region_name, cfn_resource_spec_bucket, cfn_json, False
)
# Sort keys in downloaded JSON
resource_json_sort_keys(downloaded_cfn, False)
with downloaded_cfn.open('r') as cfn_content:
json_contents = json.loads(cfn_content.read())
new_version = json_contents['ResourceSpecificationVersion']
if new_version != old_version and new_version != 'latest':
if new_version not in version_master:
version_master.append(new_version)
spec_date = None
if region_details_old.get(supported_regions[0]):
spec_date = region_details_old[supported_regions[0]].get(new_version)
#if not spec_date:
# spec_date = region_details['us-east-1'].get(new_version)
if not spec_date:
spec_date = f"{date.today():%Y-%m-%d}"
print(f"[INFO][{region_name}][DISCOVERED]New resource spec: {new_version} - {spec_date}")
region_details[region_name] = {new_version : spec_date}
updated_regions.append(region_name)
else:
print(f"[INFO][{region_name}][NO_CHANGE]Latest oddball resource spec: {new_version} is unchanged")
# Sort versions based on semantic versioning
version_master.sort(key=StrictVersion)
if updated_regions:
repo = git.Repo(f"{Path.cwd()}") # git repo base info
if not debugging:
git_email = os.environ['GITHUB_ACTOR'] + '@<EMAIL>'
git_name = os.environ['GITHUB_ACTOR']
github_repo = os.environ['GITHUB_REPOSITORY']
github_token = os.environ['GITHUB_TOKEN']
repo.git.config('--global', 'user.email', f"{git_email}")
repo.git.config('--global', 'user.name', f"{git_name}")
for version in version_master:
index = repo.index # current git head
changes_to_commit = []
for region_name in updated_regions:
included_regions = []
for version_key,date_value in region_details[region_name].items():
if version_key == version:
if region_name not in oddballs:
cfn_resource_spec_bucket=f"cfn-resource-specifications-{region_name}-prod"
downloaded_cfn = resource_download(
resource_spec_dir, version_key, region_name, cfn_resource_spec_bucket, cfn_json
)
# Sort keys in downloaded JSON
resource_json_sort_keys(downloaded_cfn)
print(f"[INFO][UPDATED]New Resource Spec for {region_name}: {version_key}")
else:
print(f"[INFO][UPDATED]New Resource Spec for {region_name}: {version_key}")
changes_to_commit.append(f"{resource_spec_dir}/{region_name}/{cfn_json}")
if not region_details_old.get(region_name):
region_details_old[region_name] = {version: date_value, 'latest': version}
else:
region_details_old[region_name][version] = date_value
region_details_old[region_name]['latest'] = version
commit_date_note = date_value
break
# Push all updated files of a particular version to Git repository
# with commit message and git tag of ResourceSpecificationVersion
index.add(changes_to_commit) # git add
if f"v{version}" not in [str(old_tag) for old_tag in repo.tags]:
index.commit(f"Version {version} {commit_date_note}") # git commit
new_tag = repo.create_tag(f"v{version}", message="New CFN Resource Spec release")
else:
index.commit(f"Version {version} {commit_date_note}") # git commit
new_tag = repo.create_tag(f"v{version}", force=True, message="New CFN Resource Spec release")
if debugging:
origin = repo.remotes[0]
origin.push()
else:
repo.git.config('--global', 'user.email', f"{git_email}")
repo.git.config('--global', 'user.name', f"{git_name}")
repo.git.remote('set-url', 'origin', "https://x-access-token:%[email protected]/%s" % (github_token, github_repo))
#repo.git.push('-f', 'origin', 'master')
#repo.git.tag('-a',f"v{version}", '-m', "New CFN Resource Spec release")
#repo.git.push('-f', 'origin', f"v{version}")
origin = repo.remotes[0]
origin.push()
origin.push(new_tag)
# Lastly, update the all-cfn-version.json with latest info
with open("all-cfn-versions.json", 'w') as file_to_dump:
json.dump(region_details_old, file_to_dump, indent=2, sort_keys=True)
# Update all-cfn-versions.json
index.add(["all-cfn-versions.json"]) # git add
index.commit(f"Latest Version {version_master[-1]}-{date.today():%Y%m%d} {commit_date_note}") # git commit
origin.push() # git push
| 2.3125 | 2 |
dicetables/tools/alias_table.py | eric-s-s/share-with-z | 5 | 12789593 | from collections import namedtuple
Alias = namedtuple('Alias', ['primary', 'alternate', 'primary_height'])
class AliasTable(object):
"""
here is
`a nice explanation of alias tables: <http://www.keithschwarz.com/darts-dice-coins/>`_
`Vose's algorithm <https://web.archive.org/web/20131029203736/http://web.eecs.utk.edu/~vose/Publications/random.pdf>`_
"""
def __init__(self, input_dict):
self._height = sum(input_dict.values())
self._length = len(input_dict)
self._aliases = self._create_aliases(input_dict)
def _create_aliases(self, input_dict):
big_heights, small_heights = self._get_height_sorted_lists(input_dict)
alias_list = []
while small_heights:
primary, primary_height = small_heights.pop()
alternate, alternate_height = big_heights.pop()
alias_list.append(Alias(primary=primary, alternate=alternate, primary_height=primary_height))
new_alternate_height = alternate_height - (self._height - primary_height)
self._update_sorting_lists(alternate, new_alternate_height, big_heights, small_heights)
while big_heights:
primary, _ = big_heights.pop()
alias_list.append(Alias(primary=primary, alternate=primary, primary_height=self._height))
return alias_list
def _update_sorting_lists(self, event, event_height, big_heights, small_heights):
if event_height < self._height:
small_heights.append((event, event_height))
else:
big_heights.append((event, event_height))
def _get_height_sorted_lists(self, input_dict):
less_than_height = []
greater_than_or_equal_height = []
for event, frequency in sorted(input_dict.items()):
event_height = self._length * frequency
self._update_sorting_lists(event, event_height, greater_than_or_equal_height, less_than_height)
return greater_than_or_equal_height, less_than_height
@property
def height(self):
return self._height
@property
def length(self):
return self._length
def to_list(self):
return self._aliases[:]
def get(self, length, height):
alias = self._aliases[length]
if height >= alias.primary_height:
return alias.alternate
return alias.primary
| 3.84375 | 4 |
gen2.py | shubham303/SynthText | 0 | 12789594 | # Author: <NAME>
# Date: 2015
"""
Entry-point for generating synthetic text images, as described in:
@InProceedings{Gupta16,
author = "<NAME>. and <NAME>. and <NAME>.",
title = "Synthetic Data for Text Localisation in Natural Images",
booktitle = "IEEE Conference on Computer Vision and Pattern Recognition",
year = "2016",
}
"""
import os
import random
import tarfile
import wget
from common import *
from create_recognition_dataset import convert_floating_coordinates_to_int, order_points, \
get_string_representation_of_bbox
from synthgen import *
## Define some configuration variables:
NUM_IMG = -1 # no. of images to use for generation (-1 to use all available):
INSTANCE_PER_IMAGE = 10 # no. of times to use the same image
SECS_PER_IMG = 5
# path to the data-file, containing image, depth and segmentation:
# url of the data (google-drive public file):
DATA_URL = 'http://www.robots.ox.ac.uk/~ankush/data.tar.gz'
OUT_FILE = './SynthText_{}.h5'.format(configuration.lang)
def get_data(data_path):
"""
Download the image,depth and segmentation data:
Returns, the h5 database.
"""
DB_FNAME = osp.join(data_path, 'dset.h5')
if not osp.exists(DB_FNAME):
try:
colorprint(Color.BLUE, '\tdownloading data (56 M) from: ' + DATA_URL, bold=True)
print()
sys.stdout.flush()
out_fname = 'data.tar.gz'
wget.download(DATA_URL, out=out_fname)
tar = tarfile.open(out_fname)
tar.extractall()
tar.close()
os.remove(out_fname)
colorprint(Color.BLUE, '\n\tdata saved at:' + DB_FNAME, bold=True)
sys.stdout.flush()
except:
print(colorize(Color.RED, 'Data not found and have problems downloading.', bold=True))
sys.stdout.flush()
sys.exit(-1)
# open the h5 file and return:
return h5py.File(DB_FNAME, 'r')
def add_res_to_db(imgname, res, db):
"""
Add the synthetically generated text image instance
and other metadata to the dataset.
"""
ninstance = len(res)
for i in range(ninstance):
dname = "%s_%d" % (imgname, i)
db['data'].create_dataset(dname, data=res[i]['img'])
db['data'][dname].attrs['charBB'] = res[i]['charBB']
db['data'][dname].attrs['wordBB'] = res[i]['wordBB']
db['data'][dname].attrs['font'] = res[i]['font']
text_utf8 = [char.encode('utf8') for char in res[i]['txt']]
db['data'][dname].attrs['txt'] = text_utf8
def save_res_to_imgs(imgname, res, gt_file, out_dir):
"""
Add the synthetically generated text image instance
and other metadata to the dataset.
"""
ninstance = len(res)
for i in range(ninstance):
filename = "{}/{}_{}.jpg".format(os.path.basename(out_dir), imgname, i)
img_file_name = "{}/{}_{}.jpg".format(out_dir, imgname, i)
for j in range(len(res[i]["txt"])):
bb = convert_floating_coordinates_to_int(res[i]['wordBB'][:,:,j]).T
bb,_,_ = order_points(bb)
bb = get_string_representation_of_bbox(bb)
s = "{}\t{}\t{}\t{}".format(filename, bb,res[i]['txt'][j] ,res[i]['font'][j])
gt_file.write(s)
gt_file.write("\n")
# Swap bgr to rgb so we can save into image file
img = res[i]['img'][..., [2, 1, 0]]
cv2.imwrite(img_file_name, img)
@wrap(entering, exiting)
def main(data_path,depth_dir, img_dir, gt_file_name,out_dir, viz=False):
# open databases:
print(colorize(Color.BLUE, 'getting data..', bold=True))
print(colorize(Color.BLUE, '\t-> done', bold=True))
# open the output h5 file:
#out_db = h5py.File(OUT_FILE, 'w')
#out_db.create_group('/data')
#print(colorize(Color.GREEN, 'Storing the output in: ' + OUT_FILE, bold=True))
#img_db = h5py.File("./img_db.h5", "r")
depth_db = h5py.File("{}/depth.h5".format(depth_dir), 'r')
seg_db = h5py.File("{}/seg.h5".format(depth_dir), 'r')
# get the names of the image files in the dataset:
imnames = sorted(open("{}/image_names.txt".format(depth_dir)).readlines())
#imnames = sorted(img_db.keys())
N = len(imnames)
global NUM_IMG
if NUM_IMG < 0:
NUM_IMG = N
start_idx, end_idx = 0, min(NUM_IMG, N)
RV3 = RendererV3(data_path, max_time=SECS_PER_IMG)
gt_file = open("{}/{}".format(out_dir,gt_file_name), "w")
range_list= list(range(start_idx, end_idx))
random.shuffle(range_list)
for i in range(start_idx, end_idx):
imname = imnames[range_list[i]].strip()
try:
# get the image:
# img = Image.fromarray(db['image'][imname][:])
#img = Image.fromarray(img_db[imname][:])
# get the pre-computed depth:
# there are 2 estimates of depth (represented as 2 "channels")
# here we are using the second one (in some cases it might be
# useful to use the other one):
if imname not in depth_db:
continue
img = cv2.imread("{}/{}".format(img_dir , imname))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
depth = depth_db[imname][:]
# depth = depth[:, :, 0]
# get segmentation:
seg = seg_db["mask"][imname][:].astype('float32')
area = seg_db["mask"][imname].attrs['area']
label = seg_db["mask"][imname].attrs['label']
# re-size uniformly:
#sz = depth.shape[:2][::-1]
#img = np.array(img.resize(sz, Image.ANTIALIAS))
#seg = np.array(Image.fromarray(seg).resize(sz, Image.NEAREST))
from utils import io
#io.write_segm_img("seg.jpg", img, seg, alpha=0.5)
#io.write_depth('depth', depth)
# get_segmentation_crop(img, seg, label)
print(colorize(Color.RED, '%d of %d' % (i, end_idx - 1), bold=True))
res = RV3.render_text(img, depth, seg, area, label,
ninstance=INSTANCE_PER_IMAGE, viz=viz)
if len(res) > 0:
# non-empty : successful in placing text:
# add_res_to_db(imname, res, out_db)
save_res_to_imgs("{}_{}".format(gt_file_name[0:gt_file_name.find(".")],i), res, gt_file, out_dir)
gt_file.flush()
# visualize the output:
if viz:
save_res_to_imgs(imname, res, gt_file, out_dir)
# if 'q' in input(colorize(Color.RED,'continue? (enter to continue, q to exit): ',True)):
# break
except:
traceback.print_exc()
print(colorize(Color.GREEN, '>>>> CONTINUING....', bold=True))
continue
#out_db.close()
gt_file.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Genereate Synthetic Scene-Text Images')
parser.add_argument('--viz', action='store_true', dest='viz', default=False,
help='flag for turning on visualizations')
parser.add_argument('--lang',
help='Select language : ENG/HI')
parser.add_argument("--data_path", default="data/")
parser.add_argument('--text_source', default='newsgroup/newsgroup.txt', help="text_source")
parser.add_argument("--image_dir", default="./", help="path to images")
parser.add_argument("--depth_dir", default="./", help="path to depth map and seg map")
parser.add_argument("--gt_file", default="gt.txt", help="path to output gt file")
parser.add_argument("--out_dir", default="./output", help="path to output gt file")
args = parser.parse_args()
configuration.lang = args.lang
configuration.text_soruce = "newsgroup/newsgroup_{}.txt".format(args.lang)
configuration.fontlist_file = "fonts/fontlist/fontlist_{}.txt".format(args.lang)
configuration.char_freq_path = 'models/{}/char_freq.cp'.format(args.lang)
configuration.font_px2pt = 'models/{}/font_px2pt.cp'.format(args.lang)
OUT_FILE = './SynthText_{}.h5'.format(configuration.lang)
main(args.data_path,args.depth_dir,args.image_dir, args.gt_file,args.out_dir, args.viz)
# TODO remove this line. kept only for debugging during development.
# visualize_results.main('results/SynthText_{}.h5'.format(configuration.lang))
# create_recognition_dataset.main('results/SynthText_{}.h5'.format(configuration.lang))
| 2.53125 | 3 |
gp_mpc/model_class.py | DiegoAE/GP-MPC | 93 | 12789595 | <reponame>DiegoAE/GP-MPC<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Dynamic System Model
Copyright (c) 2018, <NAME>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pyDOE
import numpy as np
import casadi as ca
import scipy.linalg
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
class Model:
def __init__(self, Nx, Nu, ode, dt, R=None,
alg=None, alg_0=None, Nz=0, Np=0,
opt=None, clip_negative=False):
""" Initialize dynamic model
# Arguments:
Nx: Number of states
Nu: Number of inputs
ode: ode(x, u, z, p)
dt: Sampling time
# Arguments (optional):
R: Noise covariance matrix (Ny, Ny)
alg: alg(x, z, u)
alg_0: Initial value of algebraic variables
Nz: Number of algebraic states
Np: Number of parameters
opt: Options dict to pass to the IDEAS integrator
clip_negative: If true, clip negative simulated outputs to zero
"""
# Create a default noise covariance matrix
if R is None:
self.__R = np.eye(self.__Ny) * 1e-3
else:
self.__R = R
self.__dt = dt
self.__Nu = Nu
self.__Nx = Nx
self.__Nz = Nz
self.__Np = Np
self.__clip_negative = clip_negative
""" Create integrator """
# Integrator options
options = {
"abstol" : 1e-5,
"reltol" : 1e-9,
"max_num_steps": 100,
"tf" : dt,
}
if opt is not None:
options.update(opt)
x = ca.MX.sym('x', Nx)
u = ca.MX.sym('u', Nu)
z = ca.MX.sym('z', Nz)
p = ca.MX.sym('p', Np)
par = ca.vertcat(u, p)
dae = {'x': x, 'ode': ode(x,u,z,p), 'p':par}
if alg is not None:
self.__alg0 = ca.Function('alg_0', [x, u],
[alg_0(x, u)])
dae.update({'z':z, 'alg': alg(x, z, u)})
self.Integrator = ca.integrator('DEA_Integrator', 'idas', dae, options)
else:
self.Integrator = ca.integrator('ODE_Integrator', 'cvodes', dae, options)
#TODO: Fix discrete DAE model
if alg is None:
""" Create discrete RK4 model """
ode_casadi = ca.Function("ode", [x, u, p], [ode(x,u,z,p)])
k1 = ode_casadi(x, u, p)
k2 = ode_casadi(x + dt/2*k1, u, p)
k3 = ode_casadi(x + dt/2*k2, u, p)
k4 = ode_casadi(x + dt*k3,u, p)
xrk4 = x + dt/6*(k1 + 2*k2 + 2*k3 + k4)
self.rk4 = ca.Function("ode_rk4", [x, u, p], [xrk4])
# Jacobian of continuous system
self.__jac_x = ca.Function('jac_x', [x, u, p],
[ca.jacobian(ode_casadi(x,u,p), x)])
self.__jac_u = ca.Function('jac_x', [x, u, p],
[ca.jacobian(ode_casadi(x,u,p), u)])
# Jacobian of discrete RK4 system
self.__discrete_rk4_jac_x = ca.Function('jac_x', [x, u, p],
[ca.jacobian(self.rk4(x,u,p), x)])
self.__discrete_rk4_jac_u = ca.Function('jac_x', [x, u, p],
[ca.jacobian(self.rk4(x,u,p), u)])
# Jacobian of exact discretization
self.__discrete_jac_x = ca.Function('jac_x', [x, u, p],
[ca.jacobian(self.Integrator(x0=x,
p=ca.vertcat(u,p))['xf'], x)])
self.__discrete_jac_u = ca.Function('jac_u', [x, u, p],
[ca.jacobian(self.Integrator(x0=x,
p=ca.vertcat(u,p))['xf'], u)])
def linearize(self, x0, u0, p0=[]):
""" Linearize the continuous system around the operating point
dx/dt = Ax + Bu
# Arguments:
x0: State vector
u0: Input vector
p0: Parameter vector (optional)
"""
A = np.array(self.__jac_x(x0, u0, p0))
B = np.array(self.__jac_u(x0, u0, p0))
return A, B
def discrete_linearize(self, x0, u0, p0=[]):
""" Linearize the exact discrete system around the operating point
x[k+1] = Ax[k] + Bu[k]
# Arguments:
x0: State vector
u0: Input vector
p0: Parameter vector (optional)
"""
Ad = np.array(self.__discrete_jac_x(x0, u0, p0))
Bd = np.array(self.__discrete_jac_u(x0, u0, p0))
return Ad, Bd
def discrete_rk4_linearize(self, x0, u0, p0=[]):
""" Linearize the discrete rk4 system around the operating point
x[k+1] = Ax[k] + Bu[k]
# Arguments:
x0: State vector
u0: Input vector
p0: Parameter vector (optional)
"""
Ad = np.array(self.__discrete_rk4_jac_x(x0, u0, p0))
Bd = np.array(self.__discrete_rk4_jac_u(x0, u0, p0))
return Ad, Bd
def rk4_jacobian_x(self, x0, u0, p0=[]):
""" Return state jacobian evaluated at the operating point
x[k+1] = Ax[k] + Bu[k]
# Arguments:
x0: State vector
u0: Input vector
p0: Parameter vector (optional)
"""
return self.__discrete_rk4_jac_x(x0, u0, p0)
def rk4_jacobian_u(self, x0, u0, p0=[]):
""" Return input jacobian evaluated at the operating point
x[k+1] = Ax[k] + Bu[k]
# Arguments:
x0: State vector
u0: Input vector
p0: Parameter vector (optional)
"""
return self.__discrete_rk4_jac_u(x0, u0, p0)
def check_rk4_stability(self, x0, u0, d=.1, plot=False):
""" Check if Runga Kutta 4 method is stable around operating point
# Return True if stable, False if not stable
"""
A, B = self.linearize(x0, u0, p0=[])
eigenvalues, eigenvec = scipy.linalg.eig(A)
h = self.sampling_time()
for eig in eigenvalues:
R = 1 + h*eig + (h*eig)**2/2 + (h*eig)**3/6 + (h*eig)**4/24
if np.abs(R) >= 1:
return False
# if plot:
# h = d
## N = 1000;
## th = np.linspace(0, 2*np.pi, N);
## r = np.exp(1j*th);
## f = lambda r: 1 + h*r + (h*r)**2/2 + (h*r)**3/6 + (h*r)**4/24
# plt.figure()
# x = np.arange(-3.0, 3.0, 0.01)
# y = np.arange(-3.0, 3.0, 0.01)
# X, Y = np.meshgrid(x, y)
# print(h)
# z = X + 1j*Y;
# R = 1 + h*z + (h*z)**2/2 + (h*z)**3/6 + (h*z)**4/24
# print(R.shape)
# zlevel4 = abs(R);
# plt.contour(x,y, zlevel4)
# plt.show()
return True
def sampling_time(self):
""" Get the sampling time
"""
return self.__dt
def size(self):
""" Get the size of the model
# Returns:
Nx: Number of states
Nu: Number of inputs
Np: Number of parameters
"""
return self.__Nx, self.__Nu, self.__Np
def integrate(self, x0, u, p):
""" Integrate one time sample dt
# Arguments:
x0: Initial state vector
u: Input vector
p: Parameter vector
# Returns:
x: Numpy array with x at t0 + dt
"""
par=ca.vertcat(u, p)
if self.__Nz is not 0:
z0 = self.__alg0(x0, u)
out = self.Integrator(x0=x0, p=u, z0=z0)
else:
out = self.Integrator(x0=x0, p=par)
return np.array(out["xf"]).flatten()
#TODO: Fix this or remove
def set_method(self, method='exact'):
""" Select wich discrete time method to use """
def sim(self, x0, u, p=None, noise=False):
""" Simulate system
# Arguments:
x0: Initial state (Nx, 1)
u: Input matrix with the input for each timestep in the
simulation horizon (Nt, Nu)
p: Parameter matrix with the parameters for each timestep
in the simulation horizon (Nt, Np)
noise: If True, add gaussian noise using the noise covariance matrix
# Output:
Y_sim: Matrix with the simulated outputs (Nt, Ny)
"""
Nt = np.size(u, 0)
# Initial state of the system
x = x0
# Predefine matrix to collect noisy state outputs
Y = np.zeros((Nt, self.__Nx))
for t in range(Nt):
u_t = u[t, :] # control input for simulation
if p is not None:
p_t = p[t, :] # parameter at step t
else:
p_t = []
try:
x = self.integrate(x, u_t, p_t).flatten()
except RuntimeError:
print('----------------------------------------')
print('** System unstable, simulator crashed **')
print('** t: %d **' % t)
print('----------------------------------------')
return Y
Y[t, :] = x
# Add normal white noise to state outputs
if noise:
Y[t, :] += np.random.multivariate_normal(
np.zeros((self.__Nx)), self.__R)
# Limit values to above 1e-8 to avvoid to avvoid numerical errors
if self.__clip_negative:
if np.any(Y < 0):
print('Clipping negative values in simulation!')
Y = Y.clip(min=1e-6)
return Y
def generate_training_data(self, N, uub, ulb, xub, xlb,
pub=None, plb=None, noise=True):
""" Generate training data using latin hypercube design
# Arguments:
N: Number of data points to be generated
uub: Upper input range (Nu,1)
ulb: Lower input range (Nu,1)
xub: Upper state range (Ny,1)
xlb: Lower state range (Ny,1)
# Returns:
Z: Matrix (N, Nx + Nu) with state x and inputs u at each row
Y: Matrix (N, Nx) where each row is the state x at time t+dt,
with the input from the same row in Z at time t.
"""
# Make sure boundry vectors are numpy arrays
uub = np.array(uub)
ulb = np.array(ulb)
xub = np.array(xub)
xlb = np.array(xlb)
# Predefine matrix to collect noisy state outputs
Y = np.zeros((N, self.__Nx))
# Create control input design using a latin hypecube
# Latin hypercube design for unit cube [0,1]^Nu
if self.__Nu > 0:
U = pyDOE.lhs(self.__Nu, samples=N, criterion='maximin')
# Scale control inputs to correct range
for k in range(N):
U[k, :] = U[k, :] * (uub - ulb) + ulb
else:
U = []
# Create state input design using a latin hypecube
# Latin hypercube design for unit cube [0,1]^Ny
X = pyDOE.lhs(self.__Nx, samples=N, criterion='maximin')
# Scale state inputs to correct range
for k in range(N):
X[k, :] = X[k, :] * (xub - xlb) + xlb
# Create parameter matrix
par = pyDOE.lhs(self.__Np, samples=N)
if pub is not None:
for k in range(N):
par[k, :] = par[k, :] * (pub - plb) + plb
for i in range(N):
if self.__Nu > 0:
u_t = U[i, :] # control input for simulation
else:
u_t = []
x_t = X[i, :] # state input for simulation
p_t = par[i, :] # parameter input for simulation
# Simulate system with x_t and u_t inputs for deltat time
Y[i, :] = self.integrate(x_t, u_t, p_t)
# Add normal white noise to state outputs
if noise:
Y[i, :] += np.random.multivariate_normal(
np.zeros((self.__Nx)), self.__R)
# Concatenate previous states and inputs to obtain overall input to GP model
if self.__Nu > 0:
Z = np.hstack([X, U])
else:
Z = X
return Z, Y
def plot(self, x0, u, numcols=2):
""" Simulate and plot model
# Arguments:
x0: Initial state
u: Matrix with inputs for all time steps (Nt, Nu)
numcols: Number of columns in the plot
"""
y = self.sim(x0, u, noise=True)
Nt = np.size(u, 0)
t = np.linspace(0.0, (Nt - 1)* self.__dt, Nt )
numrows = int(np.ceil(self.__Nx / numcols))
fig_x = plt.figure()
for i in range(self.__Nx):
ax = fig_x.add_subplot(numrows, numcols, i + 1)
ax.plot(t, y[:, i], 'b-', marker='.', linewidth=1.0)
ax.set_ylabel('x_' + str(i + 1))
ax.set_xlabel('Time')
fig_x.canvas.set_window_title('Model simulation')
plt.show()
def predict_compare(self, x0, u, num_cols=2, xnames=None, title=None,):
""" Predict and compare dicrete RK4 model and linearized model against
the exact model.
"""
# Predict future
Nx = self.__Nx
dt = self.sampling_time()
Nt = np.size(u, 0)
sim_time = Nt * dt
# Exact model with no noise
y_exact = self.sim(x0, u, noise=False)
y_exact = np.vstack([x0, y_exact])
# RK4
y_rk4 = np.zeros((Nt + 1 , Nx))
y_rk4[0] = x0
for t in range(Nt):
y_rk4[t + 1]= np.array(self.rk4(y_rk4[t], u[t-1, :], [])).reshape((Nx,))
# Linearized Model of Exact discretization
Ad, Bd = self.discrete_linearize(x0, u[0])
y_lin = np.zeros((Nt + 1, Nx))
y_lin[0] = x0
for t in range(Nt):
y_lin[t+1] = Ad @ y_lin[t] + Bd @ u[t]
# Linearized Model of RK4 discretization
Ad, Bd = self.discrete_rk4_linearize(x0, u[0])
y_rk4_lin = np.zeros((Nt + 1, Nx))
y_rk4_lin[0] = x0
for t in range(Nt):
y_rk4_lin[t+1] = Ad @ y_rk4_lin[t] + Bd @ u[t]
t = np.linspace(0.0, sim_time, Nt + 1)
num_rows = int(np.ceil(Nx / num_cols))
if xnames is None:
xnames = ['State %d' % (i + 1) for i in range(Nx)]
fontP = FontProperties()
fontP.set_size('small')
fig = plt.figure(figsize=(9.0, 6.0))
for i in range(Nx):
ax = fig.add_subplot(num_rows, num_cols, i + 1)
ax.plot(t, y_exact[:, i], 'b-', label='Exact')
ax.plot(t, y_rk4[:, i], 'r-', label='RK4')
# ax.plot(t, y_lin[:, i], 'g--', label='Linearized')
# ax.plot(t, y_lin[:, i], 'y--', label='Linearized RK4')
ax.set_ylabel(xnames[i])
ax.legend(prop=fontP, loc='best')
ax.set_xlabel('Time [s]')
if title is not None:
fig.canvas.set_window_title(title)
else:
fig.canvas.set_window_title('Compare approximations of system model')
plt.tight_layout()
plt.show()
| 2.453125 | 2 |
tests/core/middleware/test_request_param_normalizer.py | y19818/web3.py | 0 | 12789596 | <gh_stars>0
import pytest
from web3 import Web3
from web3.middleware import ( # noqa: F401
construct_result_generator_middleware,
request_parameter_normalizer,
)
from web3.providers.base import (
BaseProvider,
)
@pytest.fixture
def w3_base():
return Web3(provider=BaseProvider(), middlewares=[])
@pytest.fixture
def result_generator_middleware():
return construct_result_generator_middleware({
'vns_getLogs': lambda _, params: params,
})
@pytest.fixture
def w3(w3_base, result_generator_middleware):
w3_base.middleware_onion.add(result_generator_middleware)
w3_base.middleware_onion.add(request_parameter_normalizer)
return w3_base
def test_vns_getLogs_param_normalization(w3):
result = w3.vns.getLogs({
'from': 'latest', 'address': '0x1111111111111111111111111111111111111111'})
assert isinstance(result[0]['address'], list)
| 1.773438 | 2 |
Information Security/Information Security Projects/Port Scanner/port_scanner.py | Fradxyz/FCCProjects | 0 | 12789597 | from common_ports import ports_and_services
import socket, re
def get_open_ports(target, port_range, verbose=None):
open_ports = []
ports = range(port_range[0], port_range[1] + 1)
try:
host = socket.gethostbyname(target)
except:
if re.search('[a-zA-Z]', target):
return 'Error: Invalid hostname'
else:
return 'Error: Invalid IP address'
if host == target:
try:
domain = socket.gethostbyaddr(host)[0]
except:
domain = 'not found'
else:
domain = target
for port in ports:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(1)
s.connect((host, port))
open_ports.append(port)
except:
pass
if verbose:
if domain == 'not found':
verbose_str = f'Open ports for {host}\nPORT SERVICE\n'
else:
verbose_str = f'Open ports for {domain} ({host})\nPORT SERVICE\n'
for port in open_ports:
verbose_str += f'{port} {ports_and_services[port]}\n'
return verbose_str[:-1] # remove newline at end
else:
return open_ports
| 2.890625 | 3 |
Exercise_14_PassowrdGenerator.py | lukas9557/learning_python | 2 | 12789598 | <filename>Exercise_14_PassowrdGenerator.py
#Strong password generator
lowercaseLetters = list()
uppercaseLetters = list()
numbers = list()
special = list()
all = list()
output = str()
import string
lowercaseLetters = list(string.ascii_lowercase)
uppercaseLetters = list(string.ascii_uppercase)
numbers = ["0","1","2","3","4","5","6","7","8","9"]
special = list(string.punctuation)
all = lowercaseLetters + uppercaseLetters + numbers + special
n = len(all)
x = int(input("How many characters should have your password? "))
import random
for i in range(x):
y = random.randint(0,n)
output = output + all[y]
print("Your password is: ", output)
| 4 | 4 |
tests/test_inferences.py | Edwardp17/causalimpact-1 | 0 | 12789599 | <filename>tests/test_inferences.py
# MIT License
#
# Copyright (c) 2018 Dafiti OpenSource
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from statsmodels.tsa.arima_process import ArmaProcess
import pytest
from causalimpact import CausalImpact
from causalimpact.inferences import Inferences
@pytest.fixture
def inferer():
return Inferences(10)
def test_inferer_cto():
inferer = Inferences(10)
assert inferer.n_sims == 10
assert inferer.inferences is None
assert inferer.p_value is None
def test_p_value_read_only(inferer):
with pytest.raises(AttributeError):
inferer.p_value = 0.4
inferer.p_value = 0.3
def test_p_value_bigger_than_one(inferer):
with pytest.raises(ValueError):
inferer.p_value = 2
def test_p_value_lower_than_zero(inferer):
with pytest.raises(ValueError):
inferer.p_value = -1
def test_inferences_read_only(inferer):
with pytest.raises(AttributeError):
inferer.inferences = pd.DataFrame([1, 2, 3])
inferer.inferences = pd.DataFrame([1, 2, 3])
def test_inferences_raises_invalid_input(inferer):
with pytest.raises(ValueError):
inferer.inferences = 1
def test_default_causal_cto_w_positive_signal():
np.random.seed(1)
ar = np.r_[1, 0.9]
ma = np.array([1])
arma_process = ArmaProcess(ar, ma)
X = 100 + arma_process.generate_sample(nsample=100)
y = 1.2 * X + np.random.normal(size=(100))
y[70:] += 1
data = pd.DataFrame({'y': y, 'X': X}, columns=['y', 'X'])
ci = CausalImpact(data, [0, 69], [70, 99])
assert ci.p_value < 0.05
def test_causal_cto_w_positive_signal_no_standardization():
np.random.seed(1)
ar = np.r_[1, 0.9]
ma = np.array([1])
arma_process = ArmaProcess(ar, ma)
X = 100 + arma_process.generate_sample(nsample=100)
y = 1.2 * X + np.random.normal(size=(100))
y[70:] += 1
data = pd.DataFrame({'y': y, 'X': X}, columns=['y', 'X'])
ci = CausalImpact(data, [0, 69], [70, 99], standardize=False)
assert ci.p_value < 0.05
def test_default_causal_cto_w_negative_signal():
np.random.seed(1)
ar = np.r_[1, 0.9]
ma = np.array([1])
arma_process = ArmaProcess(ar, ma)
X = 100 + arma_process.generate_sample(nsample=100)
y = 1.2 * X + np.random.normal(size=(100))
y[70:] -= 1
data = pd.DataFrame({'y': y, 'X': X}, columns=['y', 'X'])
ci = CausalImpact(data, [0, 69], [70, 99])
assert ci.p_value < 0.05
def test_causal_cto_w_negative_signal_no_standardization():
np.random.seed(1)
ar = np.r_[1, 0.9]
ma = np.array([1])
arma_process = ArmaProcess(ar, ma)
X = 100 + arma_process.generate_sample(nsample=100)
y = 1.2 * X + np.random.normal(size=(100))
y[70:] -= 1
data = pd.DataFrame({'y': y, 'X': X}, columns=['y', 'X'])
ci = CausalImpact(data, [0, 69], [70, 99], standardize=False)
assert ci.p_value < 0.05
def test_default_causal_cto_no_signal():
np.random.seed(1)
ar = np.r_[1, 0.9]
ma = np.array([1])
arma_process = ArmaProcess(ar, ma)
X = 100 + arma_process.generate_sample(nsample=100)
y = 1.2 * X + np.random.normal(size=(100))
data = pd.DataFrame({'y': y, 'X': X}, columns=['y', 'X'])
ci = CausalImpact(data, [0, 69], [70, 99])
assert ci.p_value > 0.05
def test_lower_upper_percentile():
np.random.seed(1)
ar = np.r_[1, 0.9]
ma = np.array([1])
arma_process = ArmaProcess(ar, ma)
X = 100 + arma_process.generate_sample(nsample=100)
y = 1.2 * X + np.random.normal(size=(100))
data = pd.DataFrame({'y': y, 'X': X}, columns=['y', 'X'])
ci = CausalImpact(data, [0, 69], [70, 99])
ci.lower_upper_percentile == [2.5, 97.5]
| 1.679688 | 2 |
demos/datasource_csv.py | weldermarcosxd/agatereports | 1 | 12789600 | <reponame>weldermarcosxd/agatereports
# from agatereports.adapters.CSVAdapter import CSVAdapter
from agatereports.basic_report import BasicReport
import logging
logger = logging.getLogger(__name__)
def datasource_csv_sample(jrxml_filename='./jrxml/datasource_csv.jrxml', output_filename='./output/datasource_csv.pdf'):
"""
CSV data source sample.
"""
logger.info('running datasource csv sample')
# jrxml_filename = './jrxml/datasource_csv.jrxml' # input jrxml filename
# output_filename = './output/datasource_csv.pdf' # output pdf filename
# CSV datasource configuration
data_config = {'adapter': 'csv', 'filename': '../data/product.csv'}
pdf_page = BasicReport(jrxml_filename=jrxml_filename, output_filename=output_filename, data_config=data_config)
pdf_page.generate_report()
if __name__ == '__main__':
datasource_csv_sample()
| 2.5625 | 3 |
utils/table-rotation.py | vuminhduc97/TableDetect | 13 | 12789601 | <gh_stars>10-100
# -*- coding: utf-8 -*-#
#-------------------------------------------------------------------------------
# Name: table-rotation.py
# Author: wdf
# Date: 2019/7/7
# IDE: PyCharm
# Description:
# 1. HoughLines ——> get the rotation angle
# 2. warpAffine ——> affine(rotation)
# 输入一张倾斜的图像,自动仿射变换、旋转调整整个图像
# 原文:https: // blog.csdn.net / qq_37674858 / article / details / 80708393
# Usage:
# 1. input: raw image
#-------------------------------------------------------------------------------
import math
import cv2
import numpy as np
# 利用hough line 得到最长的直线对应的角度(旋转角度)
# 默认只显示最长的那条直线
def get_rotation_angle(image, show_longest_line=True, show_all_lines=False):
image = image.copy() # 复制备份,因为line()函数为in-place
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 100, 150, apertureSize=3) # canny, 便于hough line减少运算量
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 100, minLineLength=50, maxLineGap=60) # 参数很关键
# minLineLengh(线的最短长度,比这个短的都被忽略)
# maxLineCap(两条直线之间的最大间隔,小于此值,认为是一条直线)
# 函数cv2.HoughLinesP()是一种概率直线检测,原理上讲hough变换是一个耗时耗力的算法,
# 尤其是每一个点计算,即使经过了canny转换了有的时候点的个数依然是庞大的,
# 这个时候我们采取一种概率挑选机制,不是所有的点都计算,而是随机的选取一些个点来计算,相当于降采样。
lengths = [] # 存储所有线的坐标、长度
for line in lines:
x1, y1, x2, y2 = line[0]
length = ((x1-x2)**2 + (y1-y2)**2)**0.5 # 勾股定理,求直线长度
lengths.append([x1, y1, x2, y2, length])
# print(line, length)
if show_all_lines:
cv2.line(image, (x1, y1), (x2, y2), (0, 0, 0), 2) # 绘制所有直线(黑色)
# 绘制最长的直线
lengths.sort(key=lambda x: x[-1])
longest_line = lengths[-1]
print("longest_line: ",longest_line)
x1, y1, x2, y2, length= longest_line
if show_longest_line:
cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2) # 绘制直线(红色)
cv2.imshow("longest", image)
# 计算这条直线的旋转角度
angle = math.atan((y2-y1)/(x2-x1))
print("angle-radin:", angle) # 弧度形式
angle = angle*(180 /math.pi)
print("angle-degree:",angle) # 角度形式
return angle
# 输入图像、逆时针旋转的角度,旋转整个图像(解决了旋转后图像缺失的问题)
def rotate_bound(image, angle):
# 旋转中心点,默认为图像中心点
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# 给定旋转角度后,得到旋转矩阵
# 数学原理:
# https://blog.csdn.net/liyuan02/article/details/6750828
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0) # 得到旋转矩阵,1.0表示与原图大小一致
# print("RotationMatrix2D:\n", M)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# 计算旋转后的图像大小(避免图像裁剪)
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# 调整旋转矩阵(避免图像裁剪)
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
print("RotationMatrix2D:\n", M)
# 执行仿射变换、得到图像
return cv2.warpAffine(image, M, (nW, nH),borderMode=cv2.BORDER_REPLICATE)
# borderMode=cv2.BORDER_REPLICATE 使用边缘值填充
# 或使用borderValue=(255,255,255)) # 使用常数填充边界(0,0,0)表示黑色
def main():
img_path = "./img1/IMG_20190723_162452.jpg"
# img_path = "./img/table-1.png"
img = cv2.imread(img_path)
angle = get_rotation_angle(img, show_longest_line=False, show_all_lines=False)
imag = rotate_bound(img, angle) # 关键
# cv2.imshow("raw",img)
# cv2.imshow('rotated', imag)
cv2.imwrite('rotated.png', imag)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__':
main() | 2.78125 | 3 |
tests/primers_test.py | martinghunt/viridian_workflow | 8 | 12789602 | import os
import pytest
from intervaltree import Interval
from viridian_workflow import primers
this_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(this_dir, "data", "primers")
def test_AmpliconSet_from_json():
with pytest.raises(NotImplementedError):
primers.AmpliconSet.from_json("foo.json")
def test_AmpliconSet_from_tsv():
tsv_file = os.path.join(data_dir, "AmpliconSet_from_tsv.tsv")
got = primers.AmpliconSet.from_tsv(tsv_file)
primer1_l = primers.Primer("amp1_left_primer", "ACGTACGTAC", True, True, 100)
primer1_r = primers.Primer("amp1_right_primer", "TCTCTTCTCAG", False, False, 300)
primer2_l = primers.Primer("amp2_left_primer", "GGGCGCGTAGTC", True, True, 290)
primer2_r = primers.Primer("amp2_right_primer", "ATGCGCGTAAGCT", False, False, 500)
amp1 = primers.Amplicon("amp1", shortname=0)
amp1.add(primer1_l)
amp1.add(primer1_r)
amp2 = primers.Amplicon("amp2", shortname=1)
amp2.add(primer2_l)
amp2.add(primer2_r)
expect = {
"amp1": amp1,
"amp2": amp2,
}
assert got == expect
def test_AmpliconSet_from_tsv_viridian_workflow_format():
tsv_file = os.path.join(
data_dir, "AmpliconSet_from_tsv_viridian_workflow_format.tsv"
)
got = primers.AmpliconSet.from_tsv_viridian_workflow_format(tsv_file)
primer1_l = primers.Primer("amp1_left_primer", "ACGTACGTAC", True, True, 100)
primer1_r = primers.Primer("amp1_right_primer", "TCTCTTCTCAG", False, False, 300)
primer2_l = primers.Primer("amp2_left_primer", "GGGCGCGTAGTC", True, True, 290)
primer2_r = primers.Primer("amp2_right_primer", "ATGCGCGTAAGCT", False, False, 500)
primer2_r_alt = primers.Primer(
"amp2_right_primer_alt", "TGCGCGTAAGCTA", False, False, 501
)
amp1 = primers.Amplicon("amp1", shortname=0)
amp1.add(primer1_l)
amp1.add(primer1_r)
amp2 = primers.Amplicon("amp2", shortname=1)
amp2.add(primer2_l)
amp2.add(primer2_r)
amp2.add(primer2_r_alt)
expect = {
"amp1": amp1,
"amp2": amp2,
}
assert got == expect
def test_AmpliconSet_match():
tsv_file = os.path.join(data_dir, "AmpliconSet_match.amplicons.tsv")
amplicons = primers.AmpliconSet.from_tsv(tsv_file)
amplicon_set = primers.AmpliconSet("NAME", tolerance=5, tsv_file=tsv_file)
f = amplicon_set.match
assert f(0, 0) is None
assert f(0, 10000) is None
assert f(0, 100) is None
assert f(90, 100) is None
assert f(90, 150) is None
assert f(94, 150) is None
print("f(95, 150)", f(95, 150))
print("amp1:", amplicons["amp1"])
assert f(95, 150) == [amplicons["amp1"]]
assert f(96, 150) == [amplicons["amp1"]]
assert f(96, 315) == [amplicons["amp1"]]
assert f(96, 316) is None
assert f(110, 120) == [amplicons["amp1"]]
assert f(150, 350) is None
assert f(300, 400) == [amplicons["amp2"]]
| 2.21875 | 2 |
leetcode/0525 Duplicate Zeros.py | jaredliw/python-question-bank | 1 | 12789603 | class Solution(object):
def duplicateZeros(self, arr):
"""
:type arr: List[int]
:rtype: None Do not return anything, modify arr in-place instead.
"""
# Runtime: 1116 ms
# Memory: 13.8 MB
idx = 0
while idx < len(arr):
if arr[idx] == 0:
for ptr in range(len(arr) - 1, idx, -1):
arr[ptr] = arr[ptr - 1]
idx += 1
idx += 1
class Solution(object):
def duplicateZeros(self, arr):
"""
:type arr: List[int]
:rtype: None Do not return anything, modify arr in-place instead.
"""
# Runtime: 52 ms
# Memory: 13.6 MB
idx = 0
while idx < len(arr):
if arr[idx] == 0:
arr.pop()
arr.insert(idx + 1, 0)
idx += 1
idx += 1
| 3.453125 | 3 |
django_side/core/urls.py | tyronedamasceno/flask-vs-django-api | 0 | 12789604 | <filename>django_side/core/urls.py<gh_stars>0
from django.urls import path, include
from rest_framework import routers
from core import views
router = routers.DefaultRouter()
router.register('user_register', views.UserViewSet, base_name='user_register')
router.register('user', views.ProtectedUserViewSet, base_name='user')
router.register('login', views.LoginViewSet, base_name='login')
app_name = 'core'
urlpatterns = [
path('', include(router.urls))
]
| 1.84375 | 2 |
redpanda/template/entitytemplate.py | fueler/redpanda | 0 | 12789605 |
class EntityTemplate():
def __init__(self) -> None:
self._name: str = ''
self._spritesheet: str = ''
self._width: int = 0
self._height: int = 0
def __repr__(self) -> str:
return f'{self._name}: sprite:{self._spritesheet}'
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, new_name: str) -> None:
self._name = new_name
@property
def spritesheet(self) -> str:
return self._spritesheet
@spritesheet.setter
def spritesheet(self, new_spritesheet: str) -> None:
self._spritesheet = new_spritesheet
@property
def width(self) -> int:
return self._width
@width.setter
def width(self, new_width: int) -> None:
self._width = new_width
@property
def height(self) -> int:
return self._height
@height.setter
def height(self, new_height: int) -> None:
self._height = new_height
| 2.875 | 3 |
test/test_common.py | FPEPOSHI/PyBoof | 36 | 12789606 | <reponame>FPEPOSHI/PyBoof
#!/usr/bin/env python3
import unittest
from pyboof import gateway
import pyboof as pb
import numpy as np
pb.init_memmap()
class JavaData(unittest.TestCase):
def test_is_java_class(self):
array_class = gateway.jvm.java.util.ArrayList().getClass()
vector_class = gateway.jvm.java.util.Vector().getClass()
self.assertFalse(pb.is_java_class(array_class, "java.util.Vector"))
self.assertTrue(pb.is_java_class(vector_class, "java.util.Vector"))
def test_ejml_matrix_d_to_f(self):
mat_d = gateway.jvm.org.ejml.data.DMatrixRMaj(5, 4)
mat_f = pb.ejml_matrix_d_to_f(mat_d)
self.assertEqual(5, mat_f.getNumRows())
self.assertEqual(4, mat_f.getNumCols())
self.assertTrue(pb.is_java_class(mat_f.getClass(), "org.ejml.data.FMatrixRMaj"))
class ArrayTransfer(unittest.TestCase):
def test_mmap_array_python_to_java_U8(self):
pyarray = np.uint8([1, 0, 255, 100, 199])
jarray = pb.mmap_array_python_to_java(pyarray, pb.MmapType.ARRAY_U8)
self.assertEqual(5, len(jarray))
for i in range(len(pyarray)):
self.assertEqual(pyarray[i], np.uint8(jarray[i]))
def test_mmap_array_python_to_java_S8(self):
pyarray = [1, 0, -120, 100, -20]
jarray = pb.mmap_array_python_to_java(pyarray, pb.MmapType.ARRAY_S8)
self.assertEqual(5, len(jarray))
for i in range(len(pyarray)):
self.assertEqual(pyarray[i], np.int8(jarray[i]))
def test_mmap_array_python_to_java_S32(self):
pyarray = [1, 0, 1999394, -10, -99384]
# pyarray = np.int32([1, 0, 1999394, -10, -99384])
jarray = pb.mmap_array_python_to_java(pyarray, pb.MmapType.ARRAY_S32)
self.assertEqual(5, len(jarray))
for i in range(len(pyarray)):
self.assertEqual(pyarray[i], np.int32(jarray[i]))
def test_mmap_array_python_to_java_F32(self):
pyarray = np.float32([1.0, 0.0, 1.059e3, -102.034, -9.3243])
jarray = pb.mmap_array_python_to_java(pyarray, pb.MmapType.ARRAY_F32)
self.assertEqual(5, len(jarray))
for i in range(len(pyarray)):
self.assertEqual(pyarray[i], np.float32(jarray[i]))
def test_mmap_array_java_to_python_U8(self):
pyarray = np.uint8([1, 0, 255, 100, 199])
jarray = pb.mmap_array_python_to_java(pyarray, pb.MmapType.ARRAY_U8)
pyfound = pb.mmap_array_java_to_python(jarray , pb.MmapType.ARRAY_U8)
self.assertEqual(5, len(pyfound))
for i in range(len(pyfound)):
self.assertEqual(pyarray[i], pyfound[i])
def test_mmap_array_java_to_python_S8(self):
pyarray = [1, 0, -120, 100, -20]
jarray = pb.mmap_array_python_to_java(pyarray, pb.MmapType.ARRAY_S8)
pyfound = pb.mmap_array_java_to_python(jarray , pb.MmapType.ARRAY_S8)
self.assertEqual(5, len(pyfound))
for i in range(len(pyfound)):
self.assertEqual(pyarray[i], pyfound[i])
def test_mmap_array_java_to_python_S32(self):
pyarray = [1, 0, 1999394, -10, -99384]
jarray = pb.mmap_array_python_to_java(pyarray, pb.MmapType.ARRAY_S32)
pyfound = pb.mmap_array_java_to_python(jarray , pb.MmapType.ARRAY_S32)
self.assertEqual(5, len(pyfound))
for i in range(len(pyfound)):
self.assertEqual(np.int32(pyarray[i]), pyfound[i])
def test_mmap_array_java_to_python_F32(self):
pyarray = [1.0, 0.0, 1.059e3, -102.034, -9.3243]
jarray = pb.mmap_array_python_to_java(pyarray, pb.MmapType.ARRAY_F32)
pyfound = pb.mmap_array_java_to_python(jarray , pb.MmapType.ARRAY_F32)
self.assertEqual(5, len(pyfound))
for i in range(len(pyfound)):
self.assertEqual(np.float32(pyarray[i]), pyfound[i])
if __name__ == '__main__':
unittest.main()
| 2.234375 | 2 |
dakara_server/users/fields.py | DakaraProject/dakara-server | 4 | 12789607 | from django.db import models
class CaseInsensitiveFieldMixin:
"""Field mixin that uses case-insensitive lookup alternatives if they exist.
See: https://concisecoder.io/2018/10/27/case-insensitive-fields-in-django-models/
"""
LOOKUP_CONVERSIONS = {
"exact": "iexact",
"contains": "icontains",
"startswith": "istartswith",
"endswith": "iendswith",
"regex": "iregex",
}
def get_lookup(self, lookup_name):
converted = self.LOOKUP_CONVERSIONS.get(lookup_name, lookup_name)
return super().get_lookup(converted)
class CaseInsensitiveCharField(CaseInsensitiveFieldMixin, models.CharField):
"""Case insensitive char field."""
class CaseInsensitiveEmailField(CaseInsensitiveFieldMixin, models.EmailField):
"""Case insensitive email field."""
| 2.46875 | 2 |
app/books/migrations/0010_auto_20200317_0846.py | kaantecik/yazlab2_p_1 | 0 | 12789608 | # Generated by Django 3.0.4 on 2020-03-17 08:46
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0009_auto_20200315_2015'),
]
operations = [
migrations.AddField(
model_name='book',
name='is_expired',
field=models.BooleanField(blank=True, null=True),
),
migrations.AlterField(
model_name='book',
name='deadline_date',
field=models.DateTimeField(default=datetime.datetime(2020, 3, 24, 11, 46, 27, 369346), null=True),
),
migrations.AlterField(
model_name='book',
name='upload_date',
field=models.DateTimeField(default=datetime.datetime(2020, 3, 17, 11, 46, 27, 369411)),
),
]
| 1.617188 | 2 |
intervals.py | uaraven/paperclock | 1 | 12789609 | import threading
class repeating:
def __init__(self, interval_calculator):
self.interval = interval_calculator
def __call__(self, func):
def wrapped(*args):
func(*args)
sleep_for = self.interval()
threading.Timer(sleep_for, lambda: wrapped(*args)).start()
return wrapped
| 3.28125 | 3 |
interview/leet/399_Evaluate_Division_v2.py | eroicaleo/LearningPython | 1 | 12789610 | #!/usr/bin/env python
class Solution:
def calcEquation(self, equations, values, queries):
graph, ret = {}, []
for st, v in zip(equations, values):
s, t = st
graph.setdefault(s, []).append((t, v))
graph.setdefault(t, []).append((s, 1.0/v))
def dfs(s, q):
if s not in graph or s in visited:
return -1.0
visited.add(s)
for t, w in graph[s]:
if t == q:
return w
u = dfs(t, q)
if u > 0:
return w*u
return -1.0
for s, t in queries:
visited = set()
ret.append(dfs(s, t))
return ret
equations = [ ["a", "b"], ["b", "c"] ]
values = [2.0, 3.0]
queries = [ ["a", "c"], ["b", "a"], ["a", "e"], ["a", "a"], ["x", "x"] ]
equations = [["x1","x2"],["x2","x3"],["x3","x4"],["x4","x5"]]
values = [3.0,4.0,5.0,6.0]
queries = [["x1","x5"],["x5","x2"],["x2","x4"],["x2","x2"],["x2","x9"],["x9","x9"]]
sol = Solution()
print(sol.calcEquation(equations, values, queries))
| 3.359375 | 3 |
source/GA/genetic_pipeline.py | JessikaSmith/AutomatedTrainTestSplit | 0 | 12789611 | <gh_stars>0
from data import *
from source.GA import Selector, Mutation, Crossover
from vis_tools import *
from model import QRNN_model
import logging
import numpy as np
import operator
import pandas as pd
dataset = pd.read_csv('/data/GAforAutomatedTrainTestSplit/data/dataset/dataset.csv')
verification = pd.read_csv('/data/GAforAutomatedTrainTestSplit/data/dataset/verification.csv')
qrnn_model = QRNN_model(dataset)
def init_logger(function_name):
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%d-%m-%Y %H:%M',
filename='../logs/genetic_pipe.log',
filemode='a'
)
logger = logging.getLogger(function_name)
return logger
# TODO: add optimization
# Fitness function is aimed to evaluate recall, precision
# or f-measure of the performance on the verification dataset
# not to forget to add '-' sign (change max to min)
def _evaluate_fitness(permutation, objective='f1'):
assert objective in ['recall', 'precision', 'f1']
# TODO: remove coefficients hardcore
train_set = dataset.iloc[0:round(len(permutation) * 0.55), :]
test_set = dataset.iloc[round(len(permutation) * 0.55):round(len(permutation) * 0.7), :]
if objective == 'recall':
pass
if objective == 'precision':
pass
if objective == 'f1':
X_train = train_set['text'].tolist()
y_train = train_set['label'].tolist()
X_test = test_set['text'].tolist()
y_test = test_set['label'].tolist()
qrnn_model.fit(X_train=X_train, y_train=y_train,
X_test=X_test, y_test=y_test)
# TODO: save results to log
result = qrnn_model.evaluate_on_verification(verification=verification)
return -result
class Permutation:
def __init__(self, permutation):
self.permutation = permutation
self.fitness = _evaluate_fitness(permutation)
self._prob = None
def update_permutation(self, new_permutation):
self.permutatoin = new_permutation
self.fitness = _evaluate_fitness(new_permutation)
class GAPipeline:
def __init__(self, num_of_trials=100, population_size=12, best_perc=0.3,
mutation_probability=0.4):
self.number_of_trials = num_of_trials
self.population_size = population_size
self.best_perc = best_perc
self.mutation_probability = mutation_probability
# TODO: no need to load it here
self.size = dataset.shape[0]
# loggers initialization
self.run_logger = init_logger('run')
def _generate_population(self):
population = []
for _ in range(self.population_size):
path = np.random.permutation([i for i in range(self.size)])
population.append(Permutation(path))
return population
# TODO: log best result so far with params
def run(self):
self.run_logger.debug('I am running')
self.run_logger.info('Initializing population...')
population = self._generate_population()
s = Selector(selection_type='roulette')
c = Crossover(crossover_type='ordered')
m = Mutation(mutation_type='rsm')
x, y = [], []
for ii in range(self.number_of_trials):
population.sort(key=operator.attrgetter('fitness'), reverse=False)
new_generation = []
for i in range(int(self.population_size * self.best_perc)):
new_generation.append(population[i])
pairs_generator = s.selection(population=population, best_perc=self.best_perc)
for i, j in pairs_generator:
child_1, child_2 = c.crossover(parent_1=i.permutation, parent_2=j.permutation)
new_generation.append(Permutation(child_1))
new_generation.append(Permutation(child_2))
population = new_generation[:self.population_size]
for i in range(1, len(population)):
population[i].update_permutation(m.mutation(population[i].permutation,
mutation_probability=self.mutation_probability))
population.sort(key=operator.attrgetter('fitness'), reverse=False)
self.run_logger.info('Generation %s best so far %s' % (ii, population[0].fitness))
self.run_logger.debug('Best permutation: %s' % (' '.join(population[0].permutation)))
x.append(ii)
y.append(population[0].fitness)
draw_convergence(x, y, 'ps = %s, bp = %s, mr = %s' % (
round(self.population_size, 2), round(self.best_perc, 2),
round(self.mutation_probability, 2)))
return population[0].fitness
ga = GAPipeline()
ga.run() | 2.34375 | 2 |
niddk_covid_sicr/stats.py | cannin/covid-sicr-test | 0 | 12789612 | """Compute stats on the results."""
import arviz as az
from datetime import datetime
import numpy as np
import pandas as pd
from pathlib import Path
from pystan.misc import _summary
from scipy.stats import nbinom
from tqdm.auto import tqdm
from warnings import warn
from .io import extract_samples
def get_rhat(fit) -> float:
"""Get `rhat` for the log-probability of a fit.
This is a measure of the convergence across sampling chains.
Good convergence is indicated by a value near 1.0.
"""
x = _summary(fit, ['lp__'], [])
summary = pd.DataFrame(x['summary'], columns=x['summary_colnames'], index=x['summary_rownames'])
return summary.loc['lp__', 'Rhat']
def get_waic(samples: pd.DataFrame) -> dict:
"""Get the Widely-Used Information Criterion (WAIC) for a fit.
Only use if you don't have arviz (`get_waic_and_loo` is preferred).
Args:
samples (pd.DataFrame): Samples extracted from a fit.
Returns:
dict: WAIC and se of WAIC for these samples
"""
from numpy import log, exp, sum, mean, var, sqrt
# I named the Stan array 'llx'
ll = samples[[c for c in samples if 'llx' in c]]
n_samples, n_obs = ll.shape
# Convert to likelihoods (pray for no numeric precision issues)
like = exp(ll)
# log of the mean (across samples) of the likelihood for each observation
lml = log(mean(like, axis=0))
# Sum (across observations) of lml
lppd = sum(lml)
# Variance (across samples) of the log-likelihood for each observation
vll = var(ll, axis=0)
# Sum (across observations) of the vll
pwaic = sum(vll)
elpdi = lml - vll
waic = 2*(-lppd + pwaic)
# Standar error of the measure
se = 2*sqrt(n_obs*var(elpdi))
return {'waic': waic, 'se': se}
def get_waic_and_loo(fit) -> dict:
warn("`get_waic_and_loo` is deprecated, use `get_fit_quality` instead.",
DeprecationWarning)
return get_fit_quality(fit)
def get_fit_quality(fit) -> dict:
"""Compute Widely-Available Information Criterion (WAIC) and
Leave One Out (LOO) from a fit instance using Arviz.
Args:
fit: A PyStan4model instance (i.e. a PyStan fit).
Returns:
dict: WAIC and LOO statistics (and se's) for this fit.
"""
result = {}
try:
idata = az.from_pystan(fit, log_likelihood="llx")
except KeyError as e:
warn("'%s' not found; waic and loo will not be computed" % str(e),
stacklevel=2)
result.update({'waic': 0, 'loo': 0})
else:
result.update(dict(az.loo(idata, scale='deviance')))
result.update(dict(az.waic(idata, scale='deviance')))
result.update({'lp__rhat': get_rhat(fit)})
return result
def getllxtensor_singleroi(roi: str, data_path: str, fits_path: str,
models_path: str, model_name: str,
fit_format: int) -> np.array:
"""Recompute a single log-likelihood tensor (n_samples x n_datapoints).
Args:
roi (str): A single ROI, e.g. "US_MI" or "Greece".
data_path (str): Full path to the data directory.
fits_path (str): Full path to the fits directory.
models_path (str): Full path to the models directory.
model_name (str): The model name (without the '.stan' suffix).
fit_format (int): The .csv (0) or .pkl (1) fit format.
Returns:
np.array: The log-likelihood tensor.
"""
csv_path = Path(data_path) / ("covidtimeseries_%s_.csv" % roi)
df = pd.read_csv(csv_path)
t0 = np.where(df["new_cases"].values > 1)[0][0]
y = df[['new_cases', 'new_recover', 'new_deaths']].to_numpy()\
.astype(int)[t0:, :]
# load samples
samples = extract_samples(fits_path, models_path, model_name, roi,
fit_format)
S = np.shape(samples['lambda[0,0]'])[0]
# print(S)
# get number of observations, check against data above
for i in range(1000, 0, -1): # Search for it from latest to earliest
candidate = '%s[%d,0]' % ('lambda', i)
if candidate in samples:
N = i+1 # N observations, add 1 since index starts at 0
break # And move on
print(N) # run using old data
print(len(y))
llx = np.zeros((S, N, 3))
# # conversion from Stan neg_binom2(n_stan | mu,phi)
# to scipy.stats.nbinom(k,n_scipy,p)
# # n_scipy = phi, p = phi/mu, k = n_stan
# t0 = time.time()
for i in range(S):
phi = samples['phi'][i]
for j in range(N):
mu = max(samples['lambda['+str(j)+',0]'][i], 1)
llx[i, j, 0] = np.log(nbinom.pmf(max(y[j, 0], 0), phi, phi/mu))
mu = max(samples['lambda['+str(j)+',1]'][i], 1)
llx[i, j, 1] = np.log(nbinom.pmf(max(y[j, 1], 0), phi, phi/mu))
mu = max(samples['lambda['+str(j)+',2]'][i], 1)
llx[i, j, 2] = np.log(nbinom.pmf(max(y[j, 2], 0), phi, phi/mu))
print(np.sum(llx[i, :, :]))
print(samples['ll_'][i])
print('--')
return llx
def reweighted_stat(stat_vals: np.array, loo_vals: np.array,
loo_se_vals: np.array = None) -> float:
"""Get weighted means of a stat (across models),
where the weights are related to the LOO's of model/
Args:
stat_vals (np.array): Values (across models) of some statistic.
loo_vals (np.array): Values (across models) of LOO.
loo_se_vals (np.array, optional): Values (across models) of se of LOO.
Defaults to None.
Returns:
float: A new average value for the statistic, weighted across models.
"""
# Assume that loo is on a deviance scale (lower is better)
min_loo = min(loo_vals)
weights = np.exp(-0.5*(loo_vals-min_loo))
if loo_se_vals is not None:
weights *= np.exp(-0.5*loo_se_vals)
weights = weights/np.sum(weights)
return np.sum(stat_vals * weights)
def reweighted_stats(raw_table_path: str, save: bool = True,
roi_weight='n_data_pts', extra=None, first=None, dates=None) -> pd.DataFrame:
"""Reweight all statistics (across models) according to the LOO
of each of the models.
Args:
raw_table_path (str): Path to the .csv file containing the statistics
for each model.
save (bool, optional): Whether to save the results. Defaults to True.
Returns:
pd.DataFrame: The reweighted statistics
(i.e. a weighted average across models).
"""
df = pd.read_csv(raw_table_path, index_col=['model', 'roi', 'quantile'])
df = df[~df.index.duplicated(keep='last')]
df.columns.name = 'param'
df = df.stack('param').unstack(['roi', 'quantile', 'param']).T
rois = df.index.get_level_values('roi').unique()
result = pd.Series(index=df.index)
if first is not None:
rois = rois[:first]
for roi in tqdm(rois):
loo = df.loc[(roi, 'mean', 'loo')]
loo_se = df.loc[(roi, 'std', 'loo')]
# An indexer for this ROI
chunk = df.index.get_level_values('roi') == roi
result[chunk] = df[chunk].apply(lambda x:
reweighted_stat(x, loo, loo_se),
axis=1)
result = result.unstack(['param'])
result = result[~result.index.get_level_values('quantile')
.isin(['min', 'max'])] # Remove min and max
if extra is not None:
extra.columns.name = 'param'
result = result.join(extra)
# Add stats for a fixed date
if dates:
if isinstance(dates, str):
dates = [dates]
for date in dates:
result = add_fixed_date(result, date, ['Rt', 'car', 'ifr'])
# Compute global stats
means = result.unstack('roi').loc['mean'].unstack('param')
means = means.drop('AA_Global', errors='ignore')
means = means[sorted(means.columns)]
if roi_weight == 'var':
inv_var = 1/result.unstack('roi').loc['std']**2
weights = inv_var.fillna(0).unstack('param')
global_mean = (means*weights).sum() / weights.sum()
global_var = ((weights*((means - global_mean)**2)).sum()/weights.sum())
elif roi_weight == 'waic':
waic = means['waic']
n_data = means['n_data_pts']
# Assume that waic is on a deviance scale (lower is better)
weights = np.exp(-0.5*waic/n_data)
global_mean = means.mul(weights, axis=0).sum() / weights.sum()
global_var = (((means - global_mean)**2).mul(weights, axis=0)).sum()/weights.sum()
elif roi_weight == 'n_data_pts':
n_data = means['n_data_pts']
# Assume that waic is on a deviance scale (lower is better)
weights = n_data
global_mean = means.mul(weights, axis=0).sum() / weights.sum()
global_var = (((means - global_mean)**2).mul(weights, axis=0)).sum()/weights.sum()
global_sd = global_var**(1/2)
result.loc[('AA_Global', 'mean'), :] = global_mean
result.loc[('AA_Global', 'std'), :] = global_sd
result = result.sort_index()
if save:
path = Path(raw_table_path).parent / 'fit_table_reweighted.csv'
result.to_csv(path)
return result
def days_into_2020(date_str):
date = datetime.strptime(date_str, '%Y-%m-%d')
one_one = datetime.strptime('2020-01-01', '%Y-%m-%d')
return (date - one_one).days
def get_roi_week(date_str, roi_day_one):
days = days_into_2020(date_str)
roi_days = days - roi_day_one
try:
roi_week = int(roi_days/7)
except:
roi_week = 9999
return roi_week
def add_fixed_date(df, date_str, stats):
for roi in df.index:
week = get_roi_week(date_str, df.loc[roi, 't0'])
for stat in stats:
col = '%s (week %d)' % (stat, week)
new_col = '%s (%s)' % (stat, date_str)
if col in df:
df.loc[roi, new_col] = df.loc[roi, col]
else:
df.loc[roi, new_col] = None
return df | 2.765625 | 3 |
setup.py | biobakery/docent | 0 | 12789613 | <reponame>biobakery/docent
from setuptools import setup, find_packages
setup(
name='docent',
version='0.0.1',
description=("Install python scripts into a virtualenv, "
"then expose them outside of the virtualenv"),
zip_safe=False,
classifiers=[
"Development Status :: 1 - Pre-Alpha"
],
packages=['docent'],
entry_points={
'console_scripts': [
'docent = docent:main'
]
}
)
| 1.398438 | 1 |
Sampling/api.py | tpsatish95/Youtube-Comedy-Comparison | 5 | 12789614 | import urllib.request
import urllib
import re
import time
import xml
from xml.dom import minidom
import csv
f = open('comedy_comparisons.testtamp')
csv_f = csv.reader(f)
f1= open("funny.txt","w")
f2= open("notfunny.txt","w")
f1d= open("funnyd.txt","w")
f2d= open("notfunnyd.txt","w")
for row in csv_f:
j=0
for i in range(0,2):
u = "https://gdata.youtube.com/feeds/api/videos/"+row[i].strip()+"?v=2"
print (u)
a=0
while(a<3):
try:
url = urllib.request.urlretrieve(u)
print(url)
if url[0] == "":
continue
doc = minidom.parse(url[0])
sentence = doc.getElementsByTagName("title")[0].firstChild.nodeValue
description = doc.getElementsByTagName("media:description")[0].firstChild.nodeValue
print(sentence+" "+description)
if row[2]=="left" and i==0:
f1.write(sentence+"\n------------\n")
f1d.write(description+"\n------------\n")
if row[2]=="left" and i==1:
f2.write(sentence+"\n------------\n")
f2d.write(description+"\n------------\n")
if row[2]=="right" and i==0:
f2.write(sentence+"\n------------\n")
f2d.write(description+"\n------------\n")
if row[2]=="right" and i==1:
f1.write(sentence+"\n------------\n")
f1d.write(description+"\n------------\n")
print("SS")
break
except:
print ('retrying')
a=a+1
f1.flush()
f1.close()
f2.flush()
f2.close()
f1d.flush()
f1d.close()
f2d.flush()
f2d.close()
f.close()
print("Success!!")
| 2.8125 | 3 |
visualize_alpha_evolution.py | sjoshi804/neural-architecture-search-project | 2 | 12789615 | <filename>visualize_alpha_evolution.py
# External Imports
from copy import deepcopy
from os import write
import sys
# Internal Imports
from util import create_alpha_history_object, load_alpha,print_alpha, update_alpha_history, write_alpha_history_to_csvs
alpha_dir_path = sys.argv[1]
num_epochs = int(sys.argv[2])
# Load and print best alpha
best_alpha_normal, best_alpha_reduce = load_alpha(alpha_dir_path=alpha_dir_path)
print("Best Alpha Normal")
print_alpha(best_alpha_normal)
print("Best Alpha Reduce")
print_alpha(best_alpha_reduce)
# Construct Alpha History Object
alpha_normal_history = create_alpha_history_object(best_alpha_normal)
alpha_reduce_history = deepcopy(alpha_normal_history)
for epoch in range(num_epochs):
alpha_normal, alpha_reduce = load_alpha(alpha_dir_path=alpha_dir_path, epoch=epoch)
update_alpha_history(alpha_history=alpha_normal_history, alpha=alpha_normal)
update_alpha_history(alpha_history=alpha_reduce_history, alpha=alpha_reduce)
# Write to csv
write_alpha_history_path=alpha_dir_path.replace('checkpoints_search','alpha_history')
write_alpha_history_to_csvs(alpha_history=alpha_normal_history, alpha=alpha_normal, alpha_type="normal", write_dir=write_alpha_history_path)
write_alpha_history_to_csvs(alpha_history=alpha_normal_history, alpha=alpha_normal, alpha_type="reduce", write_dir=write_alpha_history_path)
| 2.625 | 3 |
qcdb/tests/nwchem_tests/test_hess_h2o.py | loriab/qccddb | 8 | 12789616 | import os
import sys
import numpy as np
import qcdb
from ..utils import *
@using("nwchem")
def test_grad():
h2o = qcdb.set_molecule(
"""
O 0.00000000 0.00000000 0.00000000
H 0.00000000 1.93042809 -1.10715266
H 0.00000000 -1.93042809 -1.10715266
units au"""
)
qcdb.set_options(
{
"basis": "sto-3g",
"scf__e_convergence": 1e-6,
#'nwchem_driver__tight': True
}
)
val = qcdb.gradient("nwc-scf")
scf = -74.888142460799
grads = np.array(
[[0.000000, 0.000000, 0.058550], [0.000000, 0.140065, -0.029275], [0.000000, -0.140065, -0.029275]]
)
assert compare_values(scf, qcdb.variable("HF TOTAL ENERGY"), 5, "scf")
assert compare_arrays(grads, qcdb.variable("CURRENT GRADIENT"), 5, "scf grad")
| 2.15625 | 2 |
mixly_arduino/sample/mixpy/海龟画图/py/海龟画图04盛开的向日葵_01太阳公公.py | wecake/Mixly_Arduino | 118 | 12789617 | import turtle
tina= turtle.Turtle()
tina.pencolor("#ffcc33")
tina.fillcolor("#ffcc33")
tina.pensize(5)
tina.begin_fill()
tina.circle (80,360)
tina.end_fill()
tina.penup()
tina.goto(-40,100)
tina.pendown()
tina.pencolor("#000000")
tina.setheading(30)
tina.circle ((-30),60)
tina.penup()
tina.goto(20,100)
tina.pendown()
tina.setheading(30)
tina.circle ((-30),60)
tina.penup()
tina.goto(-20,60)
tina.pendown()
tina.setheading(-30)
tina.circle (50,60)
tina.penup()
tina.goto(-30,-30)
tina.pendown()
tina.pencolor("#ffcc33")
tina.setheading(60)
for i in range(0, 12, 1):
tina.circle ((-35),120)
tina.left(150)
tina.hideturtle()
| 3.5625 | 4 |
config/config.py | Morgan-Gan/2D-3D-SLowFast-TSM--Proj | 0 | 12789618 | <reponame>Morgan-Gan/2D-3D-SLowFast-TSM--Proj
import ast
from typing import Tuple, List
from roi.pooler_ import Pooler
class Config(object):
ANCHOR_RATIOS = [(1, 2), (1, 1), (2, 1)]
#ANCHOR_SIZES = [128, 256, 512]
ANCHOR_SIZES = [64, 128]
POOLER_MODE = Pooler.Mode.POOLING
BACKBONE_NAME='tsm' #slowfastnet50,tsm
DETECTOR_RESULT_PATH = '/home/gan/home/ganhaiyang/output/ava/ava_train_removebadlist_v2.2.csv'
@classmethod
def describe(cls):
text = '\nConfig:\n'
attrs = [attr for attr in dir(cls) if not callable(getattr(cls, attr)) and not attr.startswith('__')]
text += '\n'.join(['\t{:s} = {:s}'.format(attr, str(getattr(cls, attr))) for attr in attrs]) + '\n'
return text
@classmethod
def setup(cls, image_min_side: float = None, image_max_side: float = None,
anchor_ratios: List[Tuple[int, int]] = None, anchor_sizes: List[int] = None, pooler_mode: str = None):
if image_min_side is not None:
cls.IMAGE_MIN_SIDE = image_min_side
if image_max_side is not None:
cls.IMAGE_MAX_SIDE = image_max_side
if anchor_ratios is not None:
cls.ANCHOR_RATIOS = ast.literal_eval(anchor_ratios)
if anchor_sizes is not None:
cls.ANCHOR_SIZES = ast.literal_eval(anchor_sizes)
if pooler_mode is not None:
cls.POOLER_MODE = Pooler.Mode(pooler_mode)
| 1.953125 | 2 |
contrib/example_plugins/mycomp/src/mycomp/mycomp.py | mjfwest/OpenMDAO-Framework | 69 | 12789619 | <filename>contrib/example_plugins/mycomp/src/mycomp/mycomp.py
from openmdao.main.api import Component
from openmdao.main.datatypes.api import Float
# Make sure that your class has some kind of docstring. Otherwise
# the descriptions for your variables won't show up in the
# source documentation.
class MyComponent(Component):
"""An example Component plugin class. """
x = Float(0.0, iotype='in', desc='some input')
y = Float(0.0, iotype='out', desc='x + 1')
def execute(self):
"""y = x + 1"""
self.y = self.x + 1.0
| 2.390625 | 2 |
ifthen/statements/thens/0001.py | tinyx/yitao.io | 0 | 12789620 | def execute(operating_player, opponent_player):
operating_player.attack = operating_player.attack + 5
| 1.726563 | 2 |
release/stubs.min/Wms/RemotingImplementation/Batches/__init__.py | tranconbv/ironpython-stubs | 0 | 12789621 | # encoding: utf-8
# module Wms.RemotingImplementation.Batches calls itself Batches
# from Wms.RemotingImplementation,Version=1.23.1.0,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no important
from System.Collections.Generic import *
from ..__init__ import *
# no functions
# classes
class BatchesExtensions(object):
# no doc
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return BatchesExtensions()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
@staticmethod
def ExtractFastLookupDictionaries(this,batchNames,orderNumbers,lineNumbers,barcodes):
""" ExtractFastLookupDictionaries(this: Batches) -> (Dictionary[str,str],Dictionary[str,str],Dictionary[str,str],Dictionary[str,str]) """
pass
@staticmethod
def GetUnpackedItemsOfCustomer(batches,customer):
""" GetUnpackedItemsOfCustomer(batches: IEnumerable[Batch],customer: PackCustomer) -> IEnumerable[ValueTuple[ItemPackLocation,OutboundOrderLine]] """
pass
__all__=[
'ExtractFastLookupDictionaries',
'GetUnpackedItemsOfCustomer',
]
class BatchManager(object):
""" BatchManager() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return BatchManager()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
@staticmethod
def CalculateAge(createdAt):
""" CalculateAge(createdAt: DateTime) -> str """
pass
@staticmethod
def CreateBatchBarcode():
""" CreateBatchBarcode() -> str """
pass
@staticmethod
def CreateCustomerNumbers(batch):
""" CreateCustomerNumbers(batch: Batch) """
pass
@staticmethod
def DeleteBatchById(id):
""" DeleteBatchById(id: str) """
pass
@staticmethod
def DeleteBatchIfNothingChanged(batchCacheKey):
""" DeleteBatchIfNothingChanged(batchCacheKey: CacheKey) """
pass
@staticmethod
def GetAllBatches():
""" GetAllBatches() -> Batches """
pass
@staticmethod
def GetApprovedBatches():
""" GetApprovedBatches() -> Batches """
pass
@staticmethod
def GetBatchByCacheKey(cacheKey):
""" GetBatchByCacheKey(cacheKey: CacheKey) -> Batch """
pass
@staticmethod
def GetBatchById(id,cacheKey):
""" GetBatchById(id: str) -> (Batch,CacheKey) """
pass
@staticmethod
def GetBatchesByIds(batchIds):
""" GetBatchesByIds(batchIds: List[str]) -> Batches """
pass
@staticmethod
def GetBatchesIncomplete():
""" GetBatchesIncomplete() -> Batches """
pass
@staticmethod
def GetBatchesIncompleteSimple():
""" GetBatchesIncompleteSimple() -> BatchBaseList """
pass
@staticmethod
def GetBatchNames(allocatedStockItemReferences,batchNames):
""" GetBatchNames(allocatedStockItemReferences: List[AllocatedStockItemReference]) -> List[str] """
pass
@staticmethod
def GetCompletedBatches():
""" GetCompletedBatches() -> Batches """
pass
@staticmethod
def SaveBatch(batch):
""" SaveBatch(batch: Batch) -> Batch """
pass
SyncLock=None
class BatchPackManager(object):
""" BatchPackManager(stockManager: IStockManager,transportPackages: TransportPackages) """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return BatchPackManager()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def AddCodAmountToFirstPackage(self,result):
""" AddCodAmountToFirstPackage(self: BatchPackManager,result: ErpProcessSalesOrderLinesResult) """
pass
def AddHistoryIdsToPackage(self,lines):
""" AddHistoryIdsToPackage(self: BatchPackManager,lines: OutboundOrderLines) """
pass
def CheckQuantityToMoveWithProcessedItems(self,dfObject):
""" CheckQuantityToMoveWithProcessedItems(self: BatchPackManager,dfObject: DataFlowObject[MoveTransportItemsBetweenTransportPackagesArgs]) -> DataFlowObject[MoveTransportItemsBetweenTransportPackagesArgs] """
pass
@staticmethod
def CleanUpPackages(batch):
""" CleanUpPackages(batch: Batch) """
pass
@staticmethod
def CleanUpTransportPackages():
""" CleanUpTransportPackages() """
pass
def ClearProcessedQuantities(self):
""" ClearProcessedQuantities(self: BatchPackManager) """
pass
@staticmethod
def CreateTransportPackages(dfObject,packagesKey):
""" CreateTransportPackages(dfObject: DataFlowObject[GetItemsToPackArgs]) -> (DataFlowObject[GetItemsToPackArgs],CacheKey) """
pass
def Dispose(self):
""" Dispose(self: BatchPackManager) """
pass
def GetBatchesByOrderNumber(self,orderNumber):
""" GetBatchesByOrderNumber(self: BatchPackManager,orderNumber: str) -> IEnumerable[Batch] """
pass
@staticmethod
def GetCustomersPending(args,customers):
""" GetCustomersPending(args: GetCustomersWithPendingPackagesArgs) -> (int,PackCustomers) """
pass
@staticmethod
def GetDeliveryDateTime():
""" GetDeliveryDateTime() -> DateTime """
pass
def GetGroupedFulfillableOrderLines(self,mergeSoLines,mergePackLocations,subtractFulfilledQuantities):
""" GetGroupedFulfillableOrderLines(self: BatchPackManager,mergeSoLines: bool,mergePackLocations: bool,subtractFulfilledQuantities: bool) -> Dictionary[OutboundOrder,OutboundOrderLines] """
pass
def GetOutboundOrderLinesForFulFillment(self,outboundOrderLines):
""" GetOutboundOrderLinesForFulFillment(self: BatchPackManager,outboundOrderLines: IEnumerable[OutboundOrderLine]) -> OutboundOrderLines """
pass
def GetOutboundOrderLinesForFulFillmentGrouped(self,outboundOrderLines,mergePackLocations):
""" GetOutboundOrderLinesForFulFillmentGrouped(self: BatchPackManager,outboundOrderLines: IEnumerable[OutboundOrderLine],mergePackLocations: bool) -> OutboundOrderLines """
pass
@staticmethod
def GetPickupDateTime():
""" GetPickupDateTime() -> DateTime """
pass
def IsContainerBox(self,boxGuid):
""" IsContainerBox(self: BatchPackManager,boxGuid: Guid) -> bool """
pass
def IsEverythingPacked(self):
""" IsEverythingPacked(self: BatchPackManager) -> bool """
pass
def IsSomethingPacked(self):
""" IsSomethingPacked(self: BatchPackManager) -> bool """
pass
def MoveItems(self,fromBoxGuid,toBoxGuid,items,orderNumber,warning):
""" MoveItems(self: BatchPackManager,fromBoxGuid: Guid,toBoxGuid: Guid,items: TransportItems,orderNumber: str) -> (bool,str) """
pass
def RemoveOrdersComplete(self):
""" RemoveOrdersComplete(self: BatchPackManager) """
pass
def RemoveOrdersIncomplete(self):
""" RemoveOrdersIncomplete(self: BatchPackManager) """
pass
def RemoveProcessedOutboundOrderLines(self,order,adjustStock):
""" RemoveProcessedOutboundOrderLines(self: BatchPackManager,order: OutboundOrder,adjustStock: bool) """
pass
def SetPackedUserAndTime(self,orderNumbers):
""" SetPackedUserAndTime(self: BatchPackManager,orderNumbers: IEnumerable[str]) """
pass
def SetQuantityProcessedOnTransportItems(self,lines):
""" SetQuantityProcessedOnTransportItems(self: BatchPackManager,lines: OutboundOrderLines) """
pass
def ValidateStockBeforeProcessPacking(self,dfObject):
""" ValidateStockBeforeProcessPacking(self: BatchPackManager,dfObject: DataFlowObject[ProcessBatchPackingArgs]) -> bool """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,stockManager,transportPackages):
""" __new__(cls: type,stockManager: IStockManager,transportPackages: TransportPackages) """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
class BatchPickManager(object):
""" BatchPickManager(batch: Batch,stock: IStockManager,settings: AllocationSettings) """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return BatchPickManager()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def CheckBatchScanForItem(self,args):
""" CheckBatchScanForItem(self: BatchPickManager,args: BatchScanArgs) -> BatchScanResult """
pass
def CheckBatchScanForLocation(self,barcode,warehouseCode,expectedLocationCode):
""" CheckBatchScanForLocation(self: BatchPickManager,barcode: str,warehouseCode: str,expectedLocationCode: str) -> BatchScanResult """
pass
@staticmethod
def CreateImagesForColliLetters(customers):
""" CreateImagesForColliLetters(customers: PackCustomers) """
pass
def DetermineRoute(self,locations=None):
""" DetermineRoute(self: BatchPickManager)DetermineRoute(locations: BatchPickLocations) -> BatchPickLocations """
pass
def Dispose(self):
""" Dispose(self: BatchPickManager) """
pass
def GetItemIdsAvailable(self,args):
""" GetItemIdsAvailable(self: BatchPickManager,args: GetItemIdentificationArgs) -> ItemIdentifications """
pass
def GetItemsToPickOnLocation(self,warehouseCode,warehouseLocationCode):
""" GetItemsToPickOnLocation(self: BatchPickManager,warehouseCode: str,warehouseLocationCode: str) -> BatchPickLocations """
pass
def GetLocationsPerOrder(self,args,putback):
""" GetLocationsPerOrder(self: BatchPickManager,args: PickArgs,putback: bool) -> List[ValueTuple[str,ItemPickLocation]] """
pass
def MarkPickLocationAsPicked(self,idOfBatchPickLocation):
""" MarkPickLocationAsPicked(self: BatchPickManager,idOfBatchPickLocation: str) -> BatchPickLocation """
pass
def Pick(self,dfObject):
""" Pick(self: BatchPickManager,dfObject: DataFlowObject[PickArgs]) -> DataFlowObject[PickArgs] """
pass
def PickItemId(self,dfObject):
""" PickItemId(self: BatchPickManager,dfObject: DataFlowObject[PickArgs]) -> DataFlowObject[PickArgs] """
pass
def PickItemIdRange(self,item,dfObject):
""" PickItemIdRange(self: BatchPickManager,item: Item,dfObject: DataFlowObject[PickItemIdRangeArgs]) -> DataFlowObject[PickItemIdRangeArgs] """
pass
def PickMultipleItemIds(self,item,itemIds,dfObject):
""" PickMultipleItemIds(self: BatchPickManager,item: Item,itemIds: List[str],dfObject: DataFlowObject[PickItemIdsArgs]) -> DataFlowObject[PickItemIdsArgs] """
pass
def PrepareWarehouseTransferForOrderLines(self,transferDescription,orderLines,recordId,getDestinationLocationForLineDelegate):
""" PrepareWarehouseTransferForOrderLines(self: BatchPickManager,transferDescription: str,orderLines: IEnumerable[OutboundOrderLine],recordId: int,getDestinationLocationForLineDelegate: OnGetDestinationLocationForLine) -> WarehouseTransfer """
pass
def PutBack(self,dfObject):
""" PutBack(self: BatchPickManager,dfObject: DataFlowObject[PickArgs]) -> DataFlowObject[PickArgs] """
pass
def ReallocatePickedOutboundOrderLine(self,containerBoxGuid,orderLine,pickLoc,warehouseCodeTo,warehouseLocationCodeTo,outerReference,innerReference):
""" ReallocatePickedOutboundOrderLine(self: BatchPickManager,containerBoxGuid: Guid,orderLine: OutboundOrderLine,pickLoc: ItemPickLocation,warehouseCodeTo: str,warehouseLocationCodeTo: str,outerReference: str,innerReference: str) """
pass
def ReallocatePickedSalesOrderLines(self,batch,warehouseLocationCodeTo):
""" ReallocatePickedSalesOrderLines(self: BatchPickManager,batch: Batch,warehouseLocationCodeTo: str) """
pass
@staticmethod
def RecreateBatchPickLocations(batch=None):
"""
RecreateBatchPickLocations(batch: Batch) -> BatchPickLocations
RecreateBatchPickLocations(self: BatchPickManager)
"""
pass
def RemovePickedLocations(self):
""" RemovePickedLocations(self: BatchPickManager) """
pass
def SetPickProcessed(self):
""" SetPickProcessed(self: BatchPickManager) """
pass
def UpdateColliReference(self,*__args):
""" UpdateColliReference(self: BatchPickManager,typeToUpdate: ReferenceType,args: PickArgs,orderNumber: str,quantity: Decimal,putBack: bool)UpdateColliReference(self: BatchPickManager,args: PickArgs,putBack: bool) """
pass
def UpdateColloReferences(self,args,putBack):
""" UpdateColloReferences(self: BatchPickManager,args: PickArgs,putBack: bool) """
pass
def ValidateBatchedItem(self,selectedBatchPickLocation,itemCode):
""" ValidateBatchedItem(self: BatchPickManager,selectedBatchPickLocation: BatchPickLocation,itemCode: str) -> DataFlowObject[CacheKey] """
pass
def ValidateBatchPickLocation(self,selectedBatchPickLocation,warehouseLocationCode):
""" ValidateBatchPickLocation(self: BatchPickManager,selectedBatchPickLocation: BatchPickLocation,warehouseLocationCode: str) -> DataFlowObject[CacheKey] """
pass
def ValidateIfAllPicked(self):
""" ValidateIfAllPicked(self: BatchPickManager) -> bool """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,batch,stock,settings):
""" __new__(cls: type,batch: Batch,stock: IStockManager,settings: AllocationSettings) """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
BoxColors=None
class ColliRegistrationResult(object):
""" ColliRegistrationResult() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return ColliRegistrationResult()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def AddCount(self,count):
""" AddCount(self: ColliRegistrationResult,count: Count) """
pass
def AddOrderLineId(self,batchId,lineId):
""" AddOrderLineId(self: ColliRegistrationResult,batchId: Guid,lineId: int) """
pass
AreColliHandled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AreColliHandled(self: ColliRegistrationResult) -> bool
Set: AreColliHandled(self: ColliRegistrationResult)=value
"""
Counts=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Counts(self: ColliRegistrationResult) -> Dictionary[int,CountForColliRegistration]
"""
OrderLineIds=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: OrderLineIds(self: ColliRegistrationResult) -> Dictionary[Guid,List[int]]
"""
class ColliRegistrator(object):
""" ColliRegistrator(transportPackages: TransportPackages) """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return ColliRegistrator()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def Dispose(self):
""" Dispose(self: ColliRegistrator) """
pass
def HandleColliForStockRegistration(self):
""" HandleColliForStockRegistration(self: ColliRegistrator) -> ColliRegistrationResult """
pass
@staticmethod
def UndoColliForStockRegistration(transportPackages,result):
""" UndoColliForStockRegistration(transportPackages: TransportPackages,result: ColliRegistrationResult) """
pass
def ValidateTransportPackage(self,package):
""" ValidateTransportPackage(self: ColliRegistrator,package: TransportPackage) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,transportPackages):
""" __new__(cls: type,transportPackages: TransportPackages) """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
class CountForColliRegistration(object):
""" CountForColliRegistration() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return CountForColliRegistration()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
ItemCode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ItemCode(self: CountForColliRegistration) -> str
Set: ItemCode(self: CountForColliRegistration)=value
"""
ItemId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ItemId(self: CountForColliRegistration) -> ItemIdentification
Set: ItemId(self: CountForColliRegistration)=value
"""
Quantity=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Quantity(self: CountForColliRegistration) -> Decimal
Set: Quantity(self: CountForColliRegistration)=value
"""
WarehouseCode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: WarehouseCode(self: CountForColliRegistration) -> str
Set: WarehouseCode(self: CountForColliRegistration)=value
"""
WarehouseLocationCode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: WarehouseLocationCode(self: CountForColliRegistration) -> str
Set: WarehouseLocationCode(self: CountForColliRegistration)=value
"""
class PickingList(object):
""" PickingList() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return PickingList()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def Create(self,batch):
""" Create(self: PickingList,batch: Batch) -> BatchPickLocations """
pass
def Dispose(self):
""" Dispose(self: PickingList) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
# variables with complex values
| 2.046875 | 2 |
rulr/Nodes/Item/Camera.py | elliotwoods/Rulr-2.0 | 6 | 12789622 | import rulr.Nodes
import rulr.Components.RigidBody
import rulr.Components.View
class Node(rulr.Nodes.Base):
def __init__(self):
super().__init__()
self.components.rigidBody = rulr.Components.RigidBody.Component()
self.components.view = rulr.Components.View.Component(self.components.rigidBody) | 2.140625 | 2 |
server/commands/__init__.py | griseduardo/Facial-Recognition-Database-Management-System | 6 | 12789623 | from .build import cli as build_cli
from .data import cli as data_cli
from .dev import cli as dev_cli
| 1.148438 | 1 |
run.py | yamamo-i/attendance-slack | 0 | 12789624 | <filename>run.py
from slackbot.bot import Bot
import logging
def main():
bot = Bot()
bot.run()
if __name__ == "__main__":
# TODO: configのクラスを作って最初にコールできるようにする
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', level=logging.INFO)
logging.info("starting....")
main()
| 2.5 | 2 |
test/test.py | is9117/mp | 0 | 12789625 | <gh_stars>0
import sys
import time
import logging
sys.path.append(".")
import mp
logging.basicConfig(format="%(asctime)s %(levelname)s [%(process)d:%(module)s:%(funcName)s] %(message)s", level=logging.DEBUG)
class Task:
def task(self, *args):
logging.info(args)
mp.start()
worker = mp.new_worker("test", Task)
for i in range(100):
worker.enqueue("test output", str(i))
time.sleep(0.3)
mp.stop()
| 2.234375 | 2 |
PokerRL/cfr/VRCFR.py | MAWUT0R/PokerRL | 1 | 12789626 | <reponame>MAWUT0R/PokerRL
# Copyright (c) 2019 <NAME>
import numpy as np
import sys
from PokerRL.cfr._MCCFRBase import MCCFRBase as _VRCFRBase
class VRCFR(_VRCFRBase):
def __init__(self,
name,
chief_handle,
game_cls,
agent_bet_set,
starting_stack_sizes=None,
innerloop_epi=None):
super().__init__(name=name,
chief_handle=chief_handle,
game_cls=game_cls,
starting_stack_sizes=starting_stack_sizes,
agent_bet_set=agent_bet_set,
algo_name="VRCFR",
innerloop_epi=innerloop_epi,
sample_method='vr'
)
# print("stack size", self._starting_stack_sizes)
self.reset()
def _regret_formula_after_first_it(self, ev_all_actions, strat_ev, last_regrets):
return ev_all_actions - strat_ev + last_regrets
def _regret_formula_first_it(self, ev_all_actions, strat_ev):
return ev_all_actions - strat_ev
def _compute_new_strategy(self, p_id):
for t_idx in range(len(self._trees)):
def _fill(_node):
if _node.p_id_acting_next == p_id:
N = len(_node.children)
_capped_reg = np.maximum(_node.data["regret"], 0)
_reg_pos_sum = np.expand_dims(np.sum(_capped_reg, axis=1), axis=1).repeat(N, axis=1)
with np.errstate(divide='ignore', invalid='ignore'):
_node.strategy = np.where(
_reg_pos_sum > 0.0,
_capped_reg / _reg_pos_sum,
np.full(shape=(self._env_bldrs[t_idx].rules.RANGE_SIZE, N,), fill_value=1.0 / N,
dtype=np.float32)
)
for c in _node.children:
_fill(c)
_fill(self._trees[t_idx].root)
def _update_V_and_M(self, p_id):
for t_idx in range(len(self._trees)):
def _fill(_node):
if self.iteration == 4:
print("pid", p_id)
_node.stg_diff = np.zeros(self._env_bldrs[t_idx].rules.RANGE_SIZE)
_node.V_value[p_id] = 0.0#np.zeros(self._env_bldrs[t_idx].rules.RANGE_SIZE)
_node.M_value[p_id] = 0.0#np.zeros(self._env_bldrs[t_idx].rules.RANGE_SIZE)
if self.iteration == 4:
print("V", _node.V_value)
print("M", _node.M_value)
sys.exit()
if _node.p_id_acting_next == p_id:
for i in range(self._env_bldrs[t_idx].rules.RANGE_SIZE):
for a in range(len(_node.strategy[i])):
current_m = abs(_node.strategy[i,a] - _node.ref_strategy[i, a])
_node.M_value[p_id, i] = max(_node.M_value[p_id, i], current_m)
_node.stg_diff[i] += current_m
for c in _node.children:
_fill(c)
child_V = c.V_value[p_id]
if _node.p_id_acting_next == p_id:
a_idx = _node.allowed_actions.index(c.action)
child_V += c.stg_diff
child_V *= _node.strategy[:, a_idx]
_node.V_value[p_id] = np.maximum(_node.V_value[p_id], child_V)
_node.M_value[p_id] = np.maximum(_node.M_value[p_id], c.M_value[p_id])
_fill(self._trees[t_idx].root)
def _compute_vr_cfv(self, p_id):
# Compute node.ev_weighted, node.ev_br_weighted, node.epsilon, node.exploitability
for t_idx in range(len(self._trees)):
self._trees[t_idx].compute_vr_ev(p_id)
def _add_strategy_to_average(self, p_id):
def _fill(_node):
if _node.p_id_acting_next == p_id:
contrib = _node.strategy * np.expand_dims(_node.reach_probs[p_id], axis=1)
if self._iter_counter > 0:
_node.data["avg_strat_sum"] += contrib
else:
_node.data["avg_strat_sum"] = contrib
_s = np.expand_dims(np.sum(_node.data["avg_strat_sum"], axis=1), axis=1)
with np.errstate(divide='ignore', invalid='ignore'):
_node.data["avg_strat"] = np.where(_s == 0,
np.full(shape=len(_node.allowed_actions),
fill_value=1.0 / len(_node.allowed_actions)),
_node.data["avg_strat_sum"] / _s
)
assert np.allclose(np.sum(_node.data["avg_strat"], axis=1), 1, atol=0.0001)
for c in _node.children:
_fill(c)
for t_idx in range(len(self._trees)):
_fill(self._trees[t_idx].root)
def _add_strategy_to_inner_average(self, p_id):
def _inner_fill(_node, weight):
for i in range(self._trees[t_idx]._env_bldr.rules.RANGE_SIZE):
if _node.reach_probs[p_id, i] == 0:
weight[i] = 0.0
if _node.p_id_acting_next == p_id:
contrib = _node.strategy * np.expand_dims(weight, axis=1)
if "inner_avg_strat_sum" not in _node.data:
raise ValueError
_node.data["inner_avg_strat_sum"] += contrib
_s = np.expand_dims(np.sum(_node.data["inner_avg_strat_sum"], axis=1), axis=1)
with np.errstate(divide='ignore', invalid='ignore'):
_node.data["inner_avg_strat"] = np.where(_s == 0,
np.full(shape=len(_node.allowed_actions),
fill_value=1.0 / len(_node.allowed_actions)),
_node.data["inner_avg_strat_sum"] / _s
)
assert np.allclose(np.sum(_node.data["inner_avg_strat"], axis=1), 1, atol=0.0001)
for c in _node.children:
if _node.p_id_acting_next == p_id:
a_idx = _node.allowed_actions.index(c.action)
new_weight = weight * _node.strategy[:, a_idx]
else:
new_weight = np.copy(weight)
_inner_fill(c, new_weight)
for t_idx in range(len(self._trees)):
_inner_fill(self._trees[t_idx].root, np.ones(self._trees[t_idx]._env_bldr.rules.RANGE_SIZE))
def _update_outer_stgy(self):
for t_idx in range(len(self._trees)):
self._trees[t_idx].update_outer_stgy()
def _update_refer_info(self):
for t_idx in range(len(self._trees)):
self._trees[t_idx].update_refer_info()
def iteration(self):
cur_nodes = self.touching_nodes
for p in range(self._n_seats):
self._update_reach_probs()
self._compute_cfv()
self._compute_regrets(p_id=p)
self._compute_new_strategy(p_id=p)
self._update_reach_probs()
self._update_refer_info()
for p in range(self._n_seats):
self._add_strategy_to_inner_average(p_id=p)
print("outer nodes", self.touching_nodes-cur_nodes)
cur_nodes = self.touching_nodes
for i in range(self._innerloop_epi):
print("inner epi", i)
for p in range(self._n_seats):
self._generate_samples(p_id=p, opponent=True)
self._compute_vr_cfv(p_id=p)
self._compute_regrets(p_id=p)
# self._update_reach_probs()
# variance = self._calcultate_variance()
# print("variance",variance)
self._compute_new_strategy(p_id=p)
self._update_V_and_M(p_id=p)
self._add_strategy_to_inner_average(p_id=p)
print("inner nodes", self.touching_nodes-cur_nodes)
cur_nodes = self.touching_nodes
self._update_outer_stgy()
self._update_reach_probs()
for p in range(self._n_seats):
self._add_strategy_to_average(p_id=p)
self._iter_counter += 1
expl = self._evaluate_avg_strats()
return expl
# print("tree visited", self._trees[0].root.visited)
| 2 | 2 |
config.py | SViN24/smlinux | 1 | 12789627 | # for file mangement and exiting
import os
import sys
# Qt imports
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QGridLayout
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QCheckBox
from PyQt5.QtWidgets import QComboBox
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QPushButton
# misc
from math import floor
'''
This is the class used for the setting and the corresponding QWidget.
You shouldn't have to edit this class.
Arguments:
'settingName' is the name of the setting as it appears in the menu
'settingType' is "check" for a checkbox, "line" for text entry,
or "dropdown" for a dropdown box.
Optional arguments:
'tooltip' lets you set the text you see when you hover over the setting
'dropdownOptions' is a list containing options for the dropdown box
'''
class ConfigSetting(QWidget):
def __init__(self, settingName, settingType, **kwargs):
super().__init__()
self.settingType = settingType
mainLayout = QHBoxLayout()
self.setLayout(mainLayout)
mainLayout.addWidget(QLabel(settingName))
if settingType == "check":
self.settingInput = QCheckBox()
self.settingInput.setChecked(True)
elif settingType == "line":
self.settingInput = QLineEdit()
elif settingType == "dropdown":
self.settingInput = QComboBox()
if kwargs.get("dropdownOptions", None):
for i in kwargs["dropdownOptions"]:
self.settingInput.addItem(i)
else:
print("No options were provided for setting \'" + settingName + "\'")
mainLayout.addWidget(self.settingInput)
if kwargs.get("tooltip", None):
self.setToolTip(kwargs["tooltip"])
def setSetting(self, setting):
if self.settingType == "check":
if setting == "1":
self.settingInput.setChecked(True)
elif setting == "0":
self.settingInput.setChecked(False)
elif self.settingType == "line":
self.settingInput.setText(setting)
elif self.settingType == "dropdown":
self.settingInput.setCurrentIndex(self.settingInput.findText(setting))
def getSetting(self):
if self.settingType == "check":
if self.settingInput.isChecked():
return "1"
else:
return "0"
elif self.settingType == "line":
return self.settingInput.text()
elif self.settingType == "dropdown":
return self.settingInput.currentText()
# This is the class for the configuration window
class ConfigWindow(QMainWindow):
def __init__(self):
super().__init__()
# Sets the title
self.setWindowTitle("smlinux configuration editor")
mainLayout = QVBoxLayout()
container = QWidget()
container.setLayout(mainLayout)
self.setCentralWidget(container)
actualSettingsLayout = QGridLayout()
actualSettingsContainer = QWidget()
actualSettingsContainer.setLayout(actualSettingsLayout)
mainLayout.addWidget(actualSettingsContainer)
# This is the dictionary holding the objects for the different settings.
# The values are the objects while the keys are the setting names
# as seen in the config file. This is what you edit to add more settings.
# Check the comment for "ConfigSetting()" to see the arguments.
self.configDict = {
"PRESET": ConfigSetting("Preset", "dropdown", dropdownOptions = [ "UserDefined", "sm64pc", "sm64-port", "sm64-portweb", "sm64dos", "sm64nx", "sm64ex", "sm64exweb", "sm64ex-coop", "androidex", "cheaterex", "render96ex","r96proto","r96master", "r96tester", "sm64-port-android-base"],tooltip="Unless Userdefined, overrides settings including GIT and BRANCH (see FAQ)."),
"VERSION": ConfigSetting("ROM VERSION", "dropdown", tooltip = "Must correspond to ROM region", dropdownOptions = ["us","jp","eu"]),
"RENDER_API": ConfigSetting("RENDER_API", "dropdown", tooltip = "Linux and macOS support GL (OpenGL 2.1+) or GL_LEGACY (OpenGL 1.1+).\nD3D11 (DirectX 11) is also offered for Windows.\n(sm64ex-based repos only)", dropdownOptions = ["GL","GL_LEGACY","D3D11"]),
"SDL_API": ConfigSetting("SDL API", "dropdown", tooltip = "CONTROLLER_API AUDIO_API Support SDL2 or SDL 1.2 (sm64ex-based repos only)", dropdownOptions = ["SDL2","SDL1"]),
"MAXJOBS": ConfigSetting("Maximum Jobs", "dropdown", dropdownOptions = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "12", "15", "" ], tooltip = "Maximum cpu threads used during compile.\nUndefined (blank) will try to use all."),
"BASEPATH": ConfigSetting("Base Folder", "line", tooltip = "Must be valid existing path.\nFolders for each repo cloned will be placed there."),
"InstallHD": ConfigSetting("Install HD Add-ons", "check", tooltip = "Install HD Add-ons when applicable including\nHD Mario & Bowser Models, HD intro (goddard) and 3D Coins ,\nUpscaled Textures & High Quality Sounds - see FAQ"),
"MODELPACK": ConfigSetting("Install R96 MODELPACK", "check", tooltip = "Install Render 96 SGI Model Pack\n(sm64ex-based repos only)"),
"DYNOS": ConfigSetting("DYNOS", "check", tooltip = "Add additional options menu with Dynamic Option System by PeachyPeach\n(sm64ex-based repos only)\n(cannot be disabled in render96ex)"),
"CHEATER": ConfigSetting("CHEATER", "check", tooltip = "Add additional cheat options with CHEATER by s4ys\n(sm64ex-based repos only)\n(cannot be disabled in render96ex)"),
"HIGHFPS": ConfigSetting("HIGHFPS", "check", tooltip="enable HIGHFPS (render96ex) or use repository provided 60fps patch"),
"NODRAWINGDISTANCE": ConfigSetting("NODRAWINGDISTANCE", "check", tooltip = "Don't hide faraway objects\n(sm64ex-based repos only)\n(render96ex includes in game slider control)"),
"EXTERNAL_DATA": ConfigSetting("EXTERNAL_DATA", "check", tooltip = "Allow add-on texture and soundpacks\n(sm64ex-based repos only)\n(cannot be disabled in render96ex)"),
"BETTERCAMERA": ConfigSetting("BETTERCAMERA", "check", tooltip = "Adds Camera Settings to options menu\n(sm64ex-based repos only)\n(cannot be disabled in render96ex)"),
"TEXTURE_FIX": ConfigSetting("TEXTURE_FIX", "check", tooltip = "Fix minor details like smoke texture\n(sm64ex-based repos only)"),
"DISCORDRPC": ConfigSetting("DISCORDRPC", "check", tooltip = "Enable Discord Rich Presence\n(64-bit sm64ex-based repos only)"),
"TEXTSAVES": ConfigSetting("TEXTSAVES", "check", tooltip = "Save player data as a text file instead of binary rom format\n(sm64ex-based repos only)\n(cannot be disabled in render96ex)"),
"DEBUG": ConfigSetting("DEBUG", "check", tooltip = "Advanced Build Option"),
"TARGET_WEB": ConfigSetting("TARGET_WEB", "check", tooltip = "Build Web Version with emsdk"),
"TARGET_RPI": ConfigSetting("TARGET_RPI", "check", tooltip = "Build Raspberry Pi version on Rapsberry Pi"),
"DISCORD_SDK": ConfigSetting("DISCORD_SDK (Co-op)", "check", tooltip = "Enable Discord Integration\n(sm64ex-coop only)"),
"IMMEDIATELOAD": ConfigSetting("IMMEDIATELOAD (Co-op)", "check", tooltip = "Advanced Build Option\n(sm64ex-coop only)"),
"GIT": ConfigSetting("UserDef Git", "line", tooltip = "GIT and BRANCH are ignored if PRESET is known"),
"BRANCH": ConfigSetting("UserDef Branch", "line", tooltip = "GIT and BRANCH are ignored if PRESET is known"),
"FLAGS": ConfigSetting("UserDef Flags", "line", tooltip = "Additional flags to pass to make. Advanced usage only.\nMust be surrounded by quotation marks, for example:\n\"CUSTOMFLAG=1 ANOTHERFLAG=WHATEVER\""),
"DOS_GL": ConfigSetting("DOS_GL: ", "dropdown", tooltip = "Supports dmesa (glide) or osmesa", dropdownOptions = ["dmesa","osmesa"]),
"ENABLE_OPENGL_LEGACY": ConfigSetting("ENABLE_OPENGL_LEGACY (DOS)", "check", tooltip = "see dos github"),
"TOUCH_CONTROLS": ConfigSetting("TOUCH_CONTROLS (Android)", "check", tooltip = "Enable touschsceen Overlay\n(Android only)"),
"ARMONLY": ConfigSetting("Target ARM Only (Android)", "check", tooltip = "Prevent x86 builds for apk\n(Android only)"),
"LEGACY_RES": ConfigSetting("LEGACY_RES (render96ex)", "check", tooltip = "Advanced Build Option\n(render96ex only)"),
"CONFIG": ConfigSetting("Prompt to configure before each build", "check", tooltip = "CONFIG"),
"BuildMusic": ConfigSetting("Play background music during build", "check", tooltip = "BuildMusic"),
"AutoUpdate": ConfigSetting("Update smlinux before each build", "check", tooltip = "AutoUpdate"),
}
# Change this variable to adjust the layout of the options.
itemsPerColumn = 11
# This loops through the dictionary and adds all the settings to the menu.
# This may be out of order depending on your version of Python 3.
for i in self.configDict:
actualSettingsLayout.addWidget(self.configDict[i], list(self.configDict.keys()).index(i) % itemsPerColumn, floor(list(self.configDict.keys()).index(i) / itemsPerColumn))
if not list(self.configDict.keys()).index(i) % itemsPerColumn:
actualSettingsLayout.setColumnStretch(floor(list(self.configDict.keys()).index(i) / itemsPerColumn), 1)
# Read in the config file
configFile = open(sys.argv[1], "r")
configFileLines = configFile.readlines()
for i in configFileLines:
if i[0] == '#':
continue
self.configDict[i.split("=")[0]].setSetting(i.split("=")[1].strip("\n"))
configFile.close()
# Now we add the cancel and save buttons
saveAndCancelContainer = QWidget()
saveAndCancelLayout = QHBoxLayout()
saveAndCancelContainer.setLayout(saveAndCancelLayout)
#cancelButton = QPushButton()
#cancelButton.setText("Exit Config Editor without Saving Changes")
#cancelButton.clicked.connect(self.close)
#saveAndCancelLayout.addWidget(cancelButton)
saveButton = QPushButton()
saveButton.setText("Okay")
saveButton.clicked.connect(self.saveAndExit)
saveAndCancelLayout.addWidget(saveButton)
mainLayout.addWidget(saveAndCancelContainer)
def saveAndExit(self):
configFile = open(sys.argv[1], "r")
configFileStrOld = configFile.read()
configFile.close()
configFileStrNew = ""
for i in configFileStrOld.splitlines():
if i[0] == '#':
configFileStrNew += i + '\n'
continue
configFileStrNew += i.split("=")[0] + '='
configFileStrNew += self.configDict[i.split("=")[0]].getSetting() + '\n'
configFile = open(sys.argv[1], "w")
configFile.write(configFileStrNew)
configFile.close()
self.close()
# This is the entrypoint for the program
def main():
# Before we do anything, let's check to see if the config file exists
if not os.path.exists(sys.argv[1]):
print("ERROR: configuration \'" + sys.argv[1] + "\' does not exist.")
sys.exit(1)
# We declare the app
configApp = QApplication(sys.argv)
# We declare the window
configWindow = ConfigWindow()
# We show the window
configWindow.show()
# We trigger the event loop for the app inside of "sys.exit()" to prevents leaks
sys.exit(configApp.exec_())
if __name__ == '__main__':
main()
| 2.953125 | 3 |
garagepi/framework/usecase/__init__.py | constructorfleet/GaragePi-Assistant | 0 | 12789628 | <reponame>constructorfleet/GaragePi-Assistant<gh_stars>0
"""Use case frameworks."""
from garagepi.framework.usecase.UseCase import UseCase
from garagepi.framework.usecase.CommandUseCase import CommandUseCase | 0.78125 | 1 |
Utilities/Dox/PythonScripts/ICRParserTest.py | rjwinchester/VistA | 72 | 12789629 | # ---------------------------------------------------------------------------
# Copyright 2018 The Open Source Electronic Health Record Alliance
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------------
import argparse
import filecmp
import json
import logging
import os
import sys
import unittest
import ICRParser
from ArgParserHelper import createArgParser
from LogManager import initLogging, logger
EXPECTED_PDFS = {
"807": ["ICR-4887.pdf"],
"General_Medical_Record___Vitals": ["ICR-3996.pdf"],
"IFCAP": ["ICR-214.pdf"],
"Imaging": ["ICR-4528.pdf"],
"Kernel": ["ICR-977.pdf", 'ICR-3103.pdf', "ICR-10156.pdf"],
"Lab_Service": ["ICR-91.pdf", "ICR-6104.pdf", "ICR-6280.pdf"],
"MailMan": ["ICR-1151.pdf"],
"Mental_Health": ["ICR-1068.pdf"],
"Order_Entry_Results_Reporting": ["ICR-639.pdf"],
"Outpatient_Pharmacy": ["ICR-1973.pdf"],
"Record_Tracking": ["ICR-85.pdf"],
"Registration": ["ICR-28.pdf", "ICR-4849.pdf", "ICR-10035.pdf"],
"Surgery": ["ICR-16.pdf", "ICR-6730.pdf"],
"Text_Integration_Utility": ["ICR-3248.pdf", "ICR-5499.pdf"],
"VA_Certified_Components___DSSI": ["ICR-5317.pdf"],
"VA_FileMan": ["ICR-10034.pdf", "ICR-10155.pdf"]}
class testICRParser(unittest.TestCase):
def test_01_json(self):
generated_output_dir = os.path.join(args.testOutDir, "JSON")
generated_icr_json = os.path.join(generated_output_dir, "ICRTest.JSON")
ICRParser.generate_json(TEST_ICR_FILE, generated_icr_json)
# Check that expected JSON was generated
expectedJsonFile = open(TEST_ICR_JSON)
generatedJsonFile = open(generated_icr_json)
expectedJson = json.load(expectedJsonFile)
generatedJson = json.load(generatedJsonFile)
# We need to compare the json contents. The order the file is written
# changes from Python 2 to Python 3
self.assertEqual(expectedJson, generatedJson)
# Make sure ONLY JSON file was generated
generated_files = os.listdir(generated_output_dir)
self.assertEqual(len(generated_files), 1)
# TODO: Remove generated_output_dir?
def test_02_html(self):
generated_output_dir = os.path.join(args.testOutDir, "HTML")
generated_icr_output_dir = os.path.join(generated_output_dir, "ICR")
generated_icr_json = os.path.join(generated_output_dir, "ICRTest.JSON")
ICRParser.generate_html(TEST_ICR_FILE, generated_icr_json, args.MRepositDir,
args.patchRepositDir, generated_icr_output_dir)
# Check that expected HTML files were generated
generated_files = sorted(os.listdir(generated_icr_output_dir))
self.assertEqual(TEST_ICR_FILES, generated_files)
# And that they contain the expected content
for f in TEST_ICR_FILES:
expected_file = os.path.join(TEST_ICR_DIR, f)
generated_file = os.path.join(generated_icr_output_dir, f)
if not filecmp.cmp(expected_file, generated_file):
self.fail("%s is not the same as %s" % (expected_file, generated_file))
# TODO: Remove generated_output_dir?
def test_03_pdf(self):
generated_output_dir = os.path.join(args.testOutDir, "PDF")
generated_pdf_output_dir = os.path.join(generated_output_dir, "dox", "PDF")
generated_icr_json = os.path.join(generated_output_dir, "ICRTest.JSON")
ICRParser.generate_pdf(TEST_ICR_FILE, generated_icr_json, args.MRepositDir,
args.patchRepositDir, generated_pdf_output_dir)
# Check that expected subdirectories were generated
expected_dirs = sorted(EXPECTED_PDFS.keys())
generated_dirs = sorted(os.listdir(generated_pdf_output_dir))
self.assertEqual(expected_dirs, generated_dirs)
# Check that expected PDFS were generated
# Note: Content is NOT checked
for pdf_dir in expected_dirs:
expected_files = sorted(EXPECTED_PDFS[pdf_dir])
generated_files = sorted(os.listdir(os.path.join(generated_pdf_output_dir, pdf_dir)))
self.assertEqual(expected_files, generated_files)
# TODO: Remove generated_output_dir?
def test_04_all(self):
generated_output_dir = os.path.join(args.testOutDir, "ALL")
generated_icr_output_dir = os.path.join(generated_output_dir, "ICR")
generated_icr_json = os.path.join(generated_output_dir, "ICRTest.JSON")
generated_pdf_output_dir = os.path.join(generated_output_dir, "dox", "PDF")
ICRParser.generate_all(TEST_ICR_FILE, generated_icr_json, args.MRepositDir,
args.patchRepositDir, generated_icr_output_dir,
generated_pdf_output_dir)
# Check that expected HTML files were generated
generated_html_files = sorted(os.listdir(generated_icr_output_dir))
self.assertEqual(TEST_ICR_FILES, generated_html_files)
# And that they contain the expected content
for f in TEST_ICR_FILES:
expected_file = os.path.join(TEST_ICR_DIR, f)
generated_file = os.path.join(generated_icr_output_dir, f)
if not filecmp.cmp(expected_file, generated_file):
self.fail("%s is not the same as %s" % (expected_file, generated_file))
# Check that expected subdirectories were generated
expected_pdf_dirs = sorted(EXPECTED_PDFS.keys())
generated_pdf_dirs = sorted(os.listdir(generated_pdf_output_dir))
self.assertEqual(expected_pdf_dirs, generated_pdf_dirs)
# Check that expected PDFS were generated
# Note: Content is NOT checked
for pdf_dir in expected_pdf_dirs:
expected_pdf_files = sorted(EXPECTED_PDFS[pdf_dir])
generated_pdf_files = sorted(os.listdir(os.path.join(generated_pdf_output_dir, pdf_dir)))
self.assertEqual(expected_pdf_files, generated_pdf_files)
# TODO: Remove generated_output_dir?
def test_05_local(self):
pass
if __name__ == '__main__':
init_parser = createArgParser()
parser = argparse.ArgumentParser(description='VistA ICR Parser',
parents=[init_parser])
parser.add_argument('testOutDir', help='Test files will be created here')
args = parser.parse_args()
# Require that output directory is empty
if os.path.exists(args.testOutDir) and os.path.isdir(args.testOutDir):
if os.listdir(args.testOutDir):
sys.exit("Test output directory must be empty")
scripts_dir = os.path.join(args.patchRepositDir, "Utilities", "Dox", "PythonScripts")
TEST_ICR_FILE = os.path.join(scripts_dir, "ICRTest.txt")
TEST_ICR_JSON = os.path.join(scripts_dir, "ICRTest.JSON")
TEST_ICR_DIR = os.path.join(scripts_dir, "ICR_TEST", "ICR")
TEST_ICR_FILES = sorted(os.listdir(TEST_ICR_DIR))
initLogging(args.testOutDir, "TESTICRParser.log", level=logging.ERROR)
suite = unittest.TestLoader().loadTestsFromTestCase(testICRParser)
unittest.TextTestRunner(verbosity=2).run(suite)
| 1.429688 | 1 |
web_shortcuts/bs4.pyw | aburakayaz/automating-the-boring-stuff | 1 | 12789630 | import webbrowser
webbrowser.open('https://www.crummy.com/software/BeautifulSoup/bs4/doc/')
| 2.03125 | 2 |
wrappers/python/virgil_crypto_lib/foundation/key_material_rng.py | odidev/virgil-crypto-c | 26 | 12789631 | <filename>wrappers/python/virgil_crypto_lib/foundation/key_material_rng.py
# Copyright (C) 2015-2021 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <<EMAIL>>
from ctypes import *
from ._c_bridge import VscfKeyMaterialRng
from virgil_crypto_lib.common._c_bridge import Buffer
from ._c_bridge import VscfStatus
from virgil_crypto_lib.common._c_bridge import Data
from .random import Random
class KeyMaterialRng(Random):
"""Random number generator that generate deterministic sequence based
on a given seed.
This RNG can be used to transform key material rial to the private key."""
# Minimum length in bytes for the key material.
KEY_MATERIAL_LEN_MIN = 32
# Maximum length in bytes for the key material.
KEY_MATERIAL_LEN_MAX = 512
def __init__(self):
"""Create underlying C context."""
self._lib_vscf_key_material_rng = VscfKeyMaterialRng()
self._c_impl = None
self._ctx = None
self.ctx = self._lib_vscf_key_material_rng.vscf_key_material_rng_new()
def __delete__(self, instance):
"""Destroy underlying C context."""
self._lib_vscf_key_material_rng.vscf_key_material_rng_delete(self.ctx)
def random(self, data_len):
"""Generate random bytes.
All RNG implementations must be thread-safe."""
data = Buffer(data_len)
status = self._lib_vscf_key_material_rng.vscf_key_material_rng_random(self.ctx, data_len, data.c_buffer)
VscfStatus.handle_status(status)
return data.get_bytes()
def reseed(self):
"""Retrieve new seed data from the entropy sources."""
status = self._lib_vscf_key_material_rng.vscf_key_material_rng_reseed(self.ctx)
VscfStatus.handle_status(status)
def reset_key_material(self, key_material):
"""Set a new key material."""
d_key_material = Data(key_material)
self._lib_vscf_key_material_rng.vscf_key_material_rng_reset_key_material(self.ctx, d_key_material.data)
@classmethod
def take_c_ctx(cls, c_ctx):
inst = cls.__new__(cls)
inst._lib_vscf_key_material_rng = VscfKeyMaterialRng()
inst.ctx = c_ctx
return inst
@classmethod
def use_c_ctx(cls, c_ctx):
inst = cls.__new__(cls)
inst._lib_vscf_key_material_rng = VscfKeyMaterialRng()
inst.ctx = inst._lib_vscf_key_material_rng.vscf_key_material_rng_shallow_copy(c_ctx)
return inst
@property
def c_impl(self):
return self._c_impl
@property
def ctx(self):
return self._ctx
@ctx.setter
def ctx(self, value):
self._ctx = self._lib_vscf_key_material_rng.vscf_key_material_rng_shallow_copy(value)
self._c_impl = self._lib_vscf_key_material_rng.vscf_key_material_rng_impl(self.ctx)
| 1.445313 | 1 |
usage_test_unit.py | budhiraja/Test-Unit | 0 | 12789632 | from test_unit_module import test_unit
class Usage(test_unit):
def __init__(self):
pass
def test_case_0(self):
"""
Example of a passing test case.
"""
actual = 8 # 3+5
expected = 8
self.assert_op(actual,expected,0) # Actual result, Expected result, test case id or identifier(not optional), HINT (Optional)
def test_case_1(self):
"""
Example of a failing test case without a hint.
"""
actual = 9 # 3+5 !=9
expected = 8
self.assert_op(actual,expected,1)
def test_case_2(self):
"""
Example of a failing test case with a hint
"""
actual = 9 # 3+5 !=9
expected = 8
self.assert_op(actual,expected, 2,"Addition is not done right.")
# NOTE : There is a third parameter. It is an optional parameter and will be used a hint ONLY IF the test case FAILS.
def main():
suite = Usage()
suite.run_tests()
if __name__ == "__main__":
main()
| 3.640625 | 4 |
cron_rdahmm_unavco.py | GeoGateway/PythonRDAHMM | 0 | 12789633 | #!/usr/local/bin/python
#==========================================================================
# Ingest, and execute rdahmm evaluation for unavco datasets
# Set up a cron job to run nightly
#
# usage: cron_rdahmm_unavco.py
#
#===========================================================================
import os, subprocess, sys
from threading import Thread
from properties import properties
unavco_cmd = properties('script_path') + "/unavco_ingest_single.py"
eval_cmd = properties('script_path') + "/rdahmm_eval_single.py"
xml_cmd = properties('script_path') + "/create_summary_xmls.py"
json_cmd = properties('script_path') + "/create_summary_jsons.py"
class ThreadJob(Thread):
def __init__(self, dataset):
Thread.__init__(self)
self.source = dataset
self.dataset = "UNAVCO_" + dataset.upper()
def run(self):
# ingest a given dataset: pbo | nucleus
print "+++Starting process UNAVCO ", self.source, " ..."
cmd = unavco_cmd
p = subprocess.Popen([cmd, self.source], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print p.stderr
print "+++Finished process UNAVCO ", self.source
# run rdahmm evaluation on the corresponding dataset
print "+++Starting process ", self.dataset, " ..."
cmd = eval_cmd
#cmd = "echo"
p = subprocess.Popen([cmd, self.dataset], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print p.stderr
print "+++Finished process ", self.dataset
# create summary xml on the corresponding dataset
print "+++creating summary xml for ", self.dataset, " ..."
cmd = xml_cmd
#cmd = "echo"
p = subprocess.Popen([cmd, self.dataset], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print p.stderr
print "+++Finished creating summary xml for ", self.dataset
# create summary json on the corresponding dataset
print "+++creating summary json for ", self.dataset, " ..."
cmd = json_cmd
#cmd = "echo"
p = subprocess.Popen([cmd, self.dataset], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print p.stderr
print "+++Finished creating summary json for ", self.dataset
for dataset in ['pbo', 'nucleus']:
t = ThreadJob(dataset)
t.start()
| 2.296875 | 2 |
src/cycle.py | haslersn/crow-cycle | 0 | 12789634 | <reponame>haslersn/crow-cycle
#!/usr/bin/env python3
import argparse
import random
import csv
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('names',
type=argparse.FileType('r'),
help='Path to a file containing names for creating a circle of "murderer-murdered" pairs.'
'Only one name per line is allowed. Names may contain spaces.')
args = parser.parse_args()
def read_names_from_file(file):
"""
Reads the provided file line by line to provide a list representation of the contained names.
:param file: A text file containing one name per line
:return: A list of the names contained in the provided text file
"""
names_list = []
for line in file:
names_list.append(str(line).strip())
return names_list
def create_cycle(p_names):
"""
Creates a random mapping from murderers to people to be murdered.
:param p_names: A list of names from which to create pairs of murderers and people to be murdered
:return: A mapping from murderers to people to be murdered
"""
pairs = {}
names = p_names.copy() # Produce no side-effects by copying list
random.shuffle(names)
for i in range(-1, len(names) - 1):
murderer = names[i]
murdered = names[i + 1]
pairs[murderer] = murdered
return pairs
def print_pairs(pairs):
for key in pairs.keys():
print("%s ---[kills]---> %s" % (key, pairs[key]))
def save_pairs(pairs):
with open('murder_pairs.csv', 'w') as f:
for key in pairs.keys():
f.write("%s,%s\n" % (key, pairs[key]))
def save_pairs_seperated(pairs):
"""
Prints murder-target pairs into seperate text files for easy copying.
:param pairs: A dictionary of murder-target pairs.
"""
with open('murderers.txt', 'w') as murderer_file:
with open('targets.txt', 'w') as targets_file:
for key in pairs.keys():
murderer_file.write("%s\n" % key)
targets_file.write("%s\n" % pairs[key])
names_list = read_names_from_file(args.names)
murder_pairs = create_cycle(names_list)
print_pairs(murder_pairs)
save_pairs(murder_pairs)
save_pairs_seperated(murder_pairs)
| 4.125 | 4 |
dynapython/client.py | Dynactionize/Dyna-Python | 0 | 12789635 | import grpc
import csv
#from tables import Table
from enum import IntEnum
from dynagatewaytypes import datatypes_pb2
from dynagatewaytypes import enums_pb2
from dynagatewaytypes import general_types_pb2
from dynagatewaytypes import authentication_pb2_grpc
from dynagatewaytypes import authentication_pb2
from dynagatewaytypes import action_pb2_grpc
from dynagatewaytypes import action_pb2
from dynagatewaytypes import topology_pb2_grpc
from dynagatewaytypes import topology_pb2
from dynagatewaytypes import label_pb2_grpc
from dynagatewaytypes import label_pb2
from dynagatewaytypes import instance_pb2_grpc
from dynagatewaytypes import instance_pb2
from dynagatewaytypes import query_pb2_grpc
from dynagatewaytypes import query_pb2
from dynagatewaytypes import networkquery_pb2_grpc
from dynagatewaytypes import networkquery_pb2
class Service(IntEnum):
ACTION_SERVICE = 0
TOPOLOGY_SERVICE = 1
LABEL_SERVICE = 2
INSTANCE_SERVICE = 3
QUERY_SERVICE = 4
NETWORK_QUERY_SERVICE = 5
class Client:
def __init__(self, host, port):
self._channel = grpc.insecure_channel('{0}:{1}'.format(host, port))
self._authservice = authentication_pb2_grpc.AuthenticateServiceStub(self._channel)
self._services = [None]*6
self._services[Service.ACTION_SERVICE] = action_pb2_grpc.ActionServiceStub(self._channel)
self._services[Service.TOPOLOGY_SERVICE] = topology_pb2_grpc.TopologyServiceStub(self._channel)
self._services[Service.LABEL_SERVICE] = label_pb2_grpc.LabelServiceStub(self._channel)
self._services[Service.INSTANCE_SERVICE] = instance_pb2_grpc.InstanceServiceStub(self._channel)
self._services[Service.QUERY_SERVICE] = query_pb2_grpc.QueryServiceStub(self._channel)
self._services[Service.NETWORK_QUERY_SERVICE] = networkquery_pb2_grpc.NetworkServiceStub(self._channel)
self._token = None
self._metadata = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self._channel.close()
def user_login(self, username, password):
loginReq = authentication_pb2.GetTokenReq(
user = authentication_pb2.UserAuth(
user_name = username,
password = password
)
)
try:
tokenResp = self._authservice.GetToken(loginReq)
except grpc.RpcError as err:
print(err)
return False
self._token = tokenResp.token
self._metadata = [('authorization', 'Bearer {0}'.format(self._token))]
return True
def service_login(self, client_id, secret):
loginReq = authentication_pb2.GetTokenReq(
service = authentication_pb2.ServiceAuth(
client_id = client_id,
secret = secret
)
)
try:
tokenResp = self._authservice.GetToken(loginReq)
except grpc.RpcError as err:
print(err)
return False
self._token = tokenResp.token
self._metadata = [('authorization', 'Bearer {0}'.format(self._token))]
return True
def bearer_login(self, bearer_token):
loginReq = authentication_pb2.GetTokenReq(
bearer = authentication_pb2.BearerToken(
token = bearer_token
)
)
try:
tokenResp = self._authservice.GetToken(loginReq)
except grpc.RpcError as err:
print(err)
return False
self._token = tokenResp.token
self._metadata = [('authorization', 'Bearer {0}'.format(self._token))]
return True
def call(self, service_func, arg):
return service_func(arg, metadata=self._metadata)
def service(self, service):
return self._services[service]
| 2.234375 | 2 |
run_main.py | zhjpqq/scaledensenet | 5 | 12789636 | <filename>run_main.py
# -*- coding: utf-8 -*-
__author__ = 'ooo'
__date__ = '2018/12/15 12:17'
import os
import time
import random
import warnings
import argparse
import torch
import torch.backends.cudnn as cudnn
from config.configure import Config
from train_val_test import run_main_by_cfgs
# How To Use #########@########################
# cd to/your/project/dir
# nohup python run_main.py -name resnet -arch res50 -cfg cfgresxx -exp exp.resxx -gpu 1 3 1>resxx.out 2>&1 &
if __name__ == '__main__':
# 声明设备配置
seed = None and random.random()
if torch.cuda.is_available():
# 必须放在主程序入口,否则无法随意指定GPU_ID
cudnn.benchmark = True
cudnn.deterministic = False
cudnn.enable = True
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
cudnn.deterministic = True
warnings.warn('\nYou have set a fixed random seed: %s, which may slow down training speed!\n' % seed)
# 声明参数配置
cfg = Config()
# 从命令行读取参数
parser = argparse.ArgumentParser(description='Pytorch Cifar10 Training & Validation')
parser.add_argument('-name', '--arch_name', type=str, default='', help='model name')
parser.add_argument('-arch', '--arch_list', type=str, nargs='+', default='',
help='the keys list of <arch_kwargs> from exp_list')
parser.add_argument('-cfg', '--cfg_dict', type=str, default='', help='the key of <config> from cfg_params')
parser.add_argument('-exp', '--exp_version', type=str, default='', help='named as uxp.xxx, experiment version')
parser.add_argument('-gpu', '--gpu_ids', type=int, nargs='+', default=[], help='which gpus used to train')
args = parser.parse_args()
# Pycharm模式下,直接给定参数,无需命令行输入
# 命令行模式下,必须注释掉此处,使用命令行输入
# args.arch_name = 'msnet'
# args.arch_list = ['ms9']
# args.cfg_dict = 'cfgms9'
# args.exp_version = 'exp.ms9'
# args.gpu_ids = [0]
# args.arch_name = 'srnet'
# args.arch_list = ['sr1']
# args.cfg_dict = 'cfgsr1'
# args.exp_version = 'exp.sr1'
# args.gpu_ids = [0, 1, 2, 3]
# print('\n=> Your Args is :', args, '\n')
# args.arch_name = 'scalenet'
# args.arch_list = ['ci7']
# args.cfg_dict = 'cfgci7'
# args.exp_version = 'exp.ci7'
# args.gpu_ids = [0, 1]
print('\n=> Your Args is :', args, '\n')
# 从配置文件 <cfg_dict> 中读取参数
import cfg_params as training_cfg
cfg_dict = getattr(training_cfg, args.cfg_dict, None)
del training_cfg
assert isinstance(cfg_dict, dict)
cfg.dict_to_class(cfg_dict, exclude=())
# 用命令行参数替换<cfg_dict>中的旧参数
if args.arch_name:
cfg.arch_name = args.arch_name
if args.exp_version:
cfg.exp_version = args.exp_version
if args.gpu_ids:
cfg.gpu_ids = args.gpu_ids
# 从构造文件 <arch_dict> 中读取模型参数
import arch_params as training_arch
arch_kwargs_list = [getattr(training_arch, arch, None) for arch in args.arch_list]
del training_arch
assert None not in arch_kwargs_list
# 运行主训练程序
for i, arch_kwargs in enumerate(arch_kwargs_list):
cfg.arch_kwargs = arch_kwargs
cfg.check_configs()
run_main_by_cfgs(cfg)
| 2.34375 | 2 |
tests/test_auth.py | taylordeatri/phc-sdk-py | 1 | 12789637 | <reponame>taylordeatri/phc-sdk-py
from phc.easy.auth import Auth, _shared_auth
def test_basic_use():
auth = Auth(
{
"token": "<PASSWORD>",
"account": "my-account",
"project_id": "my-project-id",
}
)
assert auth.account == "my-account"
def test_updating_attributes():
auth = Auth(
{
"token": "my-token",
"account": "my-account",
"project_id": "my-project-id",
}
)
assert auth.customized({"account": "new-account"}).account == "new-account"
def test_updated_shared_auth():
# Capture existing shared values
shared = Auth.shared()
original_account = shared.account
original_token = shared.token
Auth.set({"account": "research"})
assert shared.account == "research"
assert shared.token == original_token
# Reset shared object
Auth.set({"account": original_account, "token": original_token})
def test_custom_auth():
shared = Auth.shared()
original_account = shared.account
original_token = shared.token
auth = Auth.custom({"account": "research"})
assert auth.account == "research"
assert auth.token == original_token
# Does not change shared auth object
assert shared.account == original_account
assert shared.token == original_token
def test_creating_auth_from_another_auth_object():
auth = Auth({"account": "demo"})
auth1 = Auth(auth)
assert auth1.account == "demo"
| 2.515625 | 3 |
Leetcode/Python Solutions/Arrays/MinimumDominoRotationsForEqualRow.py | Mostofa-Najmus-Sakib/Applied-Algorithm | 1 | 12789638 | """
LeetCode Problem: 1007. Minimum Domino Rotations For Equal Row
Link: https://leetcode.com/problems/minimum-domino-rotations-for-equal-row/
Language: Python
Written by: <NAME>
"""
"""
Inefficient - Simulation with all possible dominos
Time Complexity: O(n)
Space Complexity: O(n)
"""
class Solution(object):
def minDominoRotations(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
# Helper function that returns the minimumSwaps required for a target domino
def dominoMinimumSwaps(target, A, B):
minSwaps = 0
for i in range(len(A)):
if A[i] != target and B[i] != target:
return float('inf')
elif A[i] != target:
minSwaps += 1
return minSwaps
minSwaps = float('inf')
dominoChoice = set(A).union(set(B)) # All possible domino choices
# Simulate all possible domino swap combinations possible
for i in list(dominoChoice):
minSwaps = min(minSwaps, dominoMinimumSwaps(i, A, B)) # A specific domino can match the top row
minSwaps = min(minSwaps, dominoMinimumSwaps(i, B, A)) # A specific domino can match the bottom row
return minSwaps if minSwaps != float('inf') else -1
"""
Efficient - Simulation with selected dominos
Time Complexity: O(n)
Space Complexity: O(1)
"""
class Solution(object):
def minDominoRotations(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
# Helper function that returns the minimumSwaps required for a target domino
def dominoMinimumSwaps(target, A, B):
minSwaps = 0
for i in range(len(A)):
if A[i] != target and B[i] != target:
return float('inf')
elif A[i] != target:
minSwaps += 1
return minSwaps
minSwaps = float('inf')
# Simulate domino swap combinations
minSwaps = min(minSwaps, dominoMinimumSwaps(A[0], A, B))
minSwaps = min(minSwaps, dominoMinimumSwaps(B[0], A, B))
minSwaps = min(minSwaps, dominoMinimumSwaps(A[0], B, A))
minSwaps = min(minSwaps, dominoMinimumSwaps(B[0], B, A))
return minSwaps if minSwaps != float('inf') else -1 | 3.609375 | 4 |
BOJ/05000~05999/5800~5899/5893.py | shinkeonkim/today-ps | 2 | 12789639 | print(bin(int(input(),2)*17)[2:])
| 2.578125 | 3 |
Game Functions/gameVars.py | jordanandrigo/Andrigo_J_Python_Homework | 0 | 12789640 | from random import randint
choices=["rock", "paper", "scissors"]
player_lives = 5
computer_lives = 5
total_lives = 5
computer=choices[randint(0,2)]
player = False | 3.078125 | 3 |
tests/fixtures/submodels.py | pashashocky/xsdata | 0 | 12789641 | <reponame>pashashocky/xsdata
from dataclasses import dataclass
from tests.fixtures.models import ChoiceType
@dataclass
class ChoiceTypeChild(ChoiceType):
pass
| 1.539063 | 2 |
backend/webshop/app_settings/prod.py | mbranko/webshop | 0 | 12789642 | <gh_stars>0
from .base import *
from .utils import read_variable, get_variable
ALLOWED_HOSTS = ['localhost', 'badasswebshop.com', 'www.badasswebshop.com']
DEBUG = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': get_variable('POSTGRES_DBNAME', 'webshop'),
'USER': get_variable('POSTGRES_USER', 'webshop'),
'PASSWORD': get_variable('POSTGRES_PASSWORD', 'webshop'),
'HOST': get_variable('POSTGRES_HOST', 'webshopdb'),
'PORT': get_variable('POSTGRES_PORT', ''),
'ATOMIC_REQUESTS': True, # opseg transakcije = HTTP zahtev
}
}
# email app_settings
EMAIL_USE_TLS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '<EMAIL>'
EMAIL_PORT = 587
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
SECRET_KEY = read_variable('/private/secrets', 'SECRET_KEY') or get_variable('SECRET_KEY', 'unknown')
EMAIL_HOST_PASSWORD = read_variable('/private/secrets', 'EMAIL_HOST_PASSWORD') or get_variable('EMAIL_HOST_PASSWORD', 'unknown')
API_THROTTLE_RATE = 10
| 1.710938 | 2 |
day2.py | luth31/aoc-2020 | 0 | 12789643 | import util
def first(input_path: str):
passwords = 0
with open(input_path) as file:
lines = file.read().splitlines()
for line in lines:
split = line.split()
(min_count_str, max_count_str) = split[0].split('-')
min_count = int(min_count_str)
max_count = int(max_count_str)
char = split[1][:-1]
password = split[2]
char_count = password.count(char)
if min_count <= char_count <= max_count:
passwords += 1
print(f"Valid passwords: {passwords}")
def second(input_path: str):
passwords = 0
with open(input_path) as file:
lines = file.read().splitlines()
for line in lines:
split = line.split()
(first_pos_str, second_pos_str) = split[0].split('-')
first_pos = int(first_pos_str)
second_pos = int(second_pos_str)
char = split[1][:-1]
password = split[2]
if password[first_pos-1] == char or password[second_pos-1] == char:
if password[first_pos-1] != password[second_pos-1]:
passwords += 1
print(f"Valid passwords: {passwords}")
if __name__ == '__main__':
path = util.get_input_path(__file__)
if util.file_exists(path):
first(path)
if util.file_exists(path):
second(path)
| 3.609375 | 4 |
9. Find the Runner-UpScore!/sol.py | Abhishek20182/HackerRank-Python-Practice | 2 | 12789644 | if __name__ == '__main__':
n = int(input())
arr = set(map(int, input().split()))
lst = list(arr)
lst.sort()
print(lst[-2]) | 3.28125 | 3 |
env_Cleaner.py | FavOla/SIAUROP | 0 | 12789645 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: fnels
"""
import numpy as np
import maze
import random
import cv2
class EnvCleaner(object):
def __init__(self, N_agent, map_size, seed):
self.map_size = map_size
self.seed = seed
self.occupancy = self.generate_maze(seed)
self.N_agent = N_agent
self.agt_pos_list = []
for i in range(self.N_agent):
self.agt_pos_list.append([1, 1])
def generate_maze(self, seed):
symbols = {
# default symbols
'start': 'S',
'end': 'X',
'wall_v': '|',
'wall_h': '-',
'wall_c': '+',
'head': '#',
'tail': 'o',
'empty': ' '
}
maze_obj = maze.Maze(int((self.map_size - 1) / 2), int((self.map_size - 1) / 2), seed, symbols, 1)
grid_map = maze_obj.to_np()
for i in range(self.map_size):
for j in range(self.map_size):
if grid_map[i][j] == 0:
grid_map[i][j] = 2
return grid_map
def step(self, action_list):
reward = 0
#print(self.occupancy)
for i in range(len(action_list)):
if action_list[i] == 0: # up
if self.occupancy[self.agt_pos_list[i][0] - 1][self.agt_pos_list[i][1]] != 1: # if can move
self.agt_pos_list[i][0] = self.agt_pos_list[i][0] - 1
else: #can't move
return -1
if action_list[i] == 2: # down
if self.occupancy[self.agt_pos_list[i][0] + 1][self.agt_pos_list[i][1]] != 1: # if can move
self.agt_pos_list[i][0] = self.agt_pos_list[i][0] + 1
else: #can't move
return -1
if action_list[i] == 3: # left
if self.occupancy[self.agt_pos_list[i][0]][self.agt_pos_list[i][1] - 1] != 1: # if can move
self.agt_pos_list[i][1] = self.agt_pos_list[i][1] - 1
else: #can't move
return -1
if action_list[i] == 1: # right
if self.occupancy[self.agt_pos_list[i][0]][self.agt_pos_list[i][1] + 1] != 1: # if can move
self.agt_pos_list[i][1] = self.agt_pos_list[i][1] + 1
else: #can't move
return -1
if self.occupancy[self.agt_pos_list[i][0]][self.agt_pos_list[i][1]] == 2: # if the spot is dirty
self.occupancy[self.agt_pos_list[i][0]][self.agt_pos_list[i][1]] = 0
reward = reward + 1
else:
return -.05 #spot not dirty
return reward
def can_move(self, action_list):
for i in range(len(action_list)):
if action_list[i] == 0: # up
if self.occupancy[self.agt_pos_list[i][0] - 1][self.agt_pos_list[i][1]] != 1: # if can move
return True
else:
return False
if action_list[i] == 2: # down
if self.occupancy[self.agt_pos_list[i][0] + 1][self.agt_pos_list[i][1]] != 1: # if can move
return True
else:
return False
if action_list[i] == 3: # left
if self.occupancy[self.agt_pos_list[i][0]][self.agt_pos_list[i][1] - 1] != 1: # if can move
return True
else:
return False
if action_list[i] == 1: # right
if self.occupancy[self.agt_pos_list[i][0]][self.agt_pos_list[i][1] + 1] != 1: # if can move
return True
else:
return False
def is_room_clean(self):
for row in self.occupancy: #go through all the tiles in room
for column in row:
if column == 2: #tile is dirty
return False
return True
def get_global_obs(self):
obs = np.zeros((self.map_size, self.map_size, 3))
for i in range(self.map_size):
for j in range(self.map_size):
if self.occupancy[i, j] == 0:
obs[i, j, 0] = 1.0
obs[i, j, 1] = 1.0
obs[i, j, 2] = 1.0
if self.occupancy[i, j] == 2:
obs[i, j, 0] = 0.0
obs[i, j, 1] = 1.0
obs[i, j, 2] = 0.0
for i in range(self.N_agent):
obs[self.agt_pos_list[i][0], self.agt_pos_list[i][1], 0] = 1.0
obs[self.agt_pos_list[i][0], self.agt_pos_list[i][1], 1] = 0.0
obs[self.agt_pos_list[i][0], self.agt_pos_list[i][1], 2] = 0.0
return obs
def reset(self):
self.occupancy = self.generate_maze(self.seed)
self.agt_pos_list = []
for i in range(self.N_agent):
self.agt_pos_list.append([1, 1])
return self.occupancy
def render(self):
obs = self.get_global_obs()
enlarge = 5
new_obs = np.ones((self.map_size*enlarge, self.map_size*enlarge, 3))
for i in range(self.map_size):
for j in range(self.map_size):
if obs[i][j][0] == 0.0 and obs[i][j][1] == 0.0 and obs[i][j][2] == 0.0:
cv2.rectangle(new_obs, (i * enlarge, j * enlarge), (i * enlarge + enlarge, j * enlarge + enlarge), (0, 0, 0), -1)
if obs[i][j][0] == 1.0 and obs[i][j][1] == 0.0 and obs[i][j][2] == 0.0:
cv2.rectangle(new_obs, (i * enlarge, j * enlarge), (i * enlarge + enlarge, j * enlarge + enlarge), (0, 0, 255), -1)
if obs[i][j][0] == 0.0 and obs[i][j][1] == 1.0 and obs[i][j][2] == 0.0:
cv2.rectangle(new_obs, (i * enlarge, j * enlarge), (i * enlarge + enlarge, j * enlarge + enlarge), (0, 255, 0), -1)
cv2.imshow('image', new_obs)
cv2.waitKey(10)
def random_action_list(self):
action_list = []
for i in range(1000):
action_list.append(random.randint(0,3))
return action_list
| 2.75 | 3 |
game.py | subhramit/Double-DQN | 10 | 12789646 | from collections import deque
import random
import numpy as np
import gym
from gym.wrappers import AtariPreprocessing
class Game():
def __init__(self, game_name, start_noop=2, last_n_frames=4, frameskip=4, grayscale_obs=True, scale_obs=False):
self.start_noop = start_noop
self.last_n_frames = last_n_frames
self.frameskip = frameskip
self.buffer = deque([], self.last_n_frames)
self.env = gym.make(game_name)
# Hacks to make environment deterministic and compatible with Atari Preprocessing
self.env.unwrapped.frameskip = 1
if 'NoFrameskip' not in self.env.spec.id:
print('Environment is not Frameskip version.')
self.env.spec.id += '-NoFrameskip'
self.envWrapped = AtariPreprocessing(self.env, frame_skip=self.frameskip, grayscale_obs=grayscale_obs, scale_obs=scale_obs)
self.envWrapped.reset()
self.n_actions = self.env.action_space.n
init_screen = self.get_screen()
# Screen dimension is represented as (CHW) for PyTorch
self.scr_dims = tuple([self.last_n_frames] + list(init_screen.shape))
for _ in range(self.frameskip):
self.buffer.append(init_screen.copy())
#self.start_game()
def start_game(self):
self.buffer.clear()
# Random starting operations to simulate human conditions
noop_action = 0
# In breakout, nothing happens unless first 'Fired'.
if 'Breakout' in self.env.spec.id:
noop_action = 1
for _ in range(random.randint(1, self.start_noop)):
# 0 corresponds to No-Op action
# 1 corresponds to Fire
self.step(noop_action)
# Fill remaining buffer by most recent frame to send a valid input to model
if len(self.buffer) > 0:
last_screen = self.buffer[-1]
else:
last_screen = self.get_screen()
while len(self.buffer) < self.buffer.maxlen:
self.buffer.append(last_screen.copy())
def get_screen(self):
screen = self.envWrapped._get_obs()
return screen
def get_input(self):
# Each element in buffer is a tensor of 84x84 dimensions.
# This function returns tensor of 4x84x84 dimensions.
return np.stack(tuple(self.buffer), axis=0)
def get_n_actions(self):
# return number of actions
return self.n_actions
def reset_env(self):
# reset the gym environment
self.env.reset()
self.start_game()
def get_screen_dims(self):
# return the screen dimensions
return self.scr_dims
def step(self, action):
screen, reward, done, _ = self.envWrapped.step(action)
# # DEBUG
# import matplotlib.pyplot as plt
# plt.imshow(screen)
# plt.plot()
# plt.savefig('tmp_img.png')
# print(action, '\t', reward)
# input()
# # DEBUG
# ALE takes care of the max pooling of the last 2 frames
# Refer: "https://danieltakeshi.github.io/2016/11/25/
# frame-skipping-and-preprocessing-for-deep-q-networks-on-atari-2600-games/"
self.buffer.append(screen)
# reward is clipped between -1 and 1
reward = np.clip(reward, -1.0, 1.0)
return reward, done
"""
Actions in OpenAI Gym ALE
-------------------------
ACTION_MEANING = {
0: "NOOP",
1: "FIRE",
2: "UP",
3: "RIGHT",
4: "LEFT",
5: "DOWN",
6: "UPRIGHT",
7: "UPLEFT",
8: "DOWNRIGHT",
9: "DOWNLEFT",
10: "UPFIRE",
11: "RIGHTFIRE",
12: "LEFTFIRE",
13: "DOWNFIRE",
14: "UPRIGHTFIRE",
15: "UPLEFTFIRE",
16: "DOWNRIGHTFIRE",
17: "DOWNLEFTFIRE",
}
"""
| 2.71875 | 3 |
checkov/arm/registry.py | pmalkki/checkov | 4,013 | 12789647 | <reponame>pmalkki/checkov
from checkov.arm.base_registry import Registry
arm_resource_registry = Registry()
arm_parameter_registry = Registry()
| 1.179688 | 1 |
src/tests/docx.py | glibin/simple-report | 0 | 12789648 | <filename>src/tests/docx.py<gh_stars>0
#!coding:utf-8
import unittest
import os
from simple_report.report import DocumentReport
from simple_report.xlsx.spreadsheet_ml import (SectionException,
SectionNotFoundException)
from simple_report.docx.drawing import DocxImage
LOREM_IPSUM = (
'Lorem ipsum dolor sit amet, consectetur adipisicing elit, '
'sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. '
'Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris '
'nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in '
'reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla '
'pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa '
'qui officia deserunt mollit anim id est laborum.'
)
class TestLinuxDOCX(unittest.TestCase):
"""
"""
def setUp(self):
self.src_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'test_data',
'linux',
'docx'
)
self.dst_dir = self.src_dir
self.test_files = dict([(path, os.path.join(self.src_dir, path))
for path in os.listdir(self.src_dir)
if path.startswith('test')])
def test_simple_docx(self):
"""
"""
template_name = 'test-sluzh.docx'
path = self.test_files[template_name]
doc = DocumentReport(path)
res_file_name = 'res-' + template_name
dst = os.path.join(self.dst_dir, res_file_name)
doc.build(dst, {'Employee_name': u'<NAME>.',
'region_name': u'Казань'})
self.assertEqual(os.path.exists(dst), True)
def test_spreadsheet_docx(self):
"""
Текст внутри таблицы
"""
template_name = 'test_spreadsheet.docx'
path = self.test_files[template_name]
doc = DocumentReport(path)
res_file_name = 'res-' + template_name
dst = os.path.join(self.dst_dir, res_file_name)
tag1 = doc.get_all_parameters().next()
self.assertEqual(tag1, '#sometext#')
doc.build(dst, {'sometext': u'Некий текст'})
self.assertEqual(os.path.exists(dst), True)
def test_picture_docx(self):
"""
Текст внутри прямоугольника
"""
template_name = 'test_rect.docx'
path = self.test_files[template_name]
doc = DocumentReport(path)
res_file_name = 'res-' + template_name
dst = os.path.join(self.dst_dir, res_file_name)
tags = []
for tag in doc.get_all_parameters():
tags.append(tag)
self.assertFalse(tags[0] != '#brandgroupname#'
and tags[0] != '#category#')
self.assertFalse(tags[1] != '#brandgroupname#'
and tags[1] != '#category#')
doc.build(dst, {'brandgroupname': u'Брэнд', 'category': u'Категория'})
self.assertEqual(os.path.exists(dst), True)
def test_picture_shape(self):
template_name = 'test_pict_shape_2.docx'
path = self.test_files[template_name]
res_file_name = 'res-pict_shape.docx'
dst = os.path.join(self.dst_dir, res_file_name)
report = DocumentReport(path)
params = {}
params['fname'] = '1'
params['sname'] = '2'
params['pname'] = '3'
params['issued_by'] = '4'
params['date_of_birth'] = '5'
params['date_start_day'] = '6'
params['date_start_month'] = '7'
params['date_start_year'] = '8'
params['date_start'] = '9'
params['date_end_day'] = '10'
params['date_end_month'] = '11'
params['date_end_year'] = '12'
params['date_end'] = '13'
params['region_number'] = '14'
params['date_start_plus'] = '15'
params['date_start_plus_day'] = '16'
params['date_start_plus_month'] = '17'
params['date_start_plus'] = '18'
params['date_start_plus_year'] = '19'
params['habaddr'] = '20'
params['regaddr1'] = '21'
params['regaddr2'] = '22'
params['regaddr3'] = '23'
params['inspect1'] = '24'
params['inspect2'] = '25'
params['is_AI'] = u"AI"
params['is_AII'] = u"AII"
params['is_AIII'] = u"AIII"
params['is_AIV'] = u"AIV"
params['is_B'] = u"B"
params['is_C'] = u"C"
params['is_D'] = u"D"
params['is_E'] = u"E"
params['is_F'] = u"F"
params['#komment#'] = 'd'
report.build(dst, params)
self.assertEqual(os.path.exists(dst), True)
def test_tables_flush(self):
template_name = 'test_table.docx'
path = self.test_files[template_name]
res_file_name = 'res-table_flush.docx'
dst = os.path.join(self.dst_dir, res_file_name)
report = DocumentReport(path)
# report.set_docx_table_sections()
s1 = report.get_section('section1')
s2 = report.get_section('section2')
s2.flush({'test': u'Lorem ipsum'})
s1.flush({
'test_table_row1col1': u'Hello',
'test_table_row1col2': u'simple_report',
'test_table_row1col3': u'user',
'test_table_row1col4': LOREM_IPSUM,
})
params = {}
report.build(dst, params)
def test_table_section_double(self):
template_name = 'test_table_double_section.docx'
path = self.test_files[template_name]
report = DocumentReport(path)
try:
report.get_section('section1')
except SectionException:
pass
else:
raise Exception('Docx tables sections doubling test failed')
def test_divisible_keys(self):
template_name = 'test_divisibles.docx'
path = self.test_files[template_name]
report = DocumentReport(path)
res_file_name = 'res-divisibles.docx'
dst = os.path.join(self.dst_dir, res_file_name)
params = {
"tasks": "",
"kind_tostring": u"документарная и выездная",
"normative_list": "",
"finish_date": "13.12.2012",
"expert_list": "",
"docs": "",
"num": "1",
"purpose": "",
"address": u"420101, Респ Татарстан (Татарстан), г Казань, ул Карбышева, д. 37, кв. 44",
"events": "",
"subject3": "x",
"articles": "",
"inspectors_list": "",
"supervisionobj_name": u"Малыши и малышки",
"oyear": 2013,
"type_tostring": u"внеплановая",
"start_date": "14.02.2013",
"subject1": "x",
"subject2": "x",
"oday": 21,
"subject4": "x",
"subject5": "x",
"subject6": "x",
"subject7": "x",
"authority_parent": "",
"omonth": 3
}
report.build(dst, params)
def test_flush_order(self):
template_name = 'test_flush_order.docx'
path = self.test_files[template_name]
report = DocumentReport(path)
res_file_name = 'res-flush_order.docx'
dst = os.path.join(self.dst_dir, res_file_name)
params = {
"example": "output_one",
"example_two": "ouput_two",
"example_two_three": "output_two_three",
"exampl": "no_output"
}
report.build(dst, params)
def test_tabs(self):
template_name = 'test_tabs.docx'
path = self.test_files[template_name]
report = DocumentReport(path)
res_file_name = 'res-tabs.docx'
dst = os.path.join(self.dst_dir, res_file_name)
params = {
"tfoms_to": "TFOMS",
"tfoms_to_address": "TFOMS_ADDRESS",
"tfoms_to_director_fio": "TFOMS_TO_DIR_FIO"
}
report.build(dst, params)
def test_insert_picture(self):
template_name = 'test_insert_image.docx'
path = self.test_files[template_name]
report = DocumentReport(path)
res_file_name = 'res-insert-image.docx'
dst = os.path.join(self.dst_dir, res_file_name)
params = {
"test": u"Тестовый комментарий",
"image": DocxImage(
self.test_files['test_insert_image.jpg'],
3,
2
),
"tfoms_to_director_fio": "TFOMS_TO_DIR_FIO"
}
report.build(dst, params)
def test_table_insert_picture(self):
template_name = 'test_table.docx'
path = self.test_files[template_name]
res_file_name = 'res-table-image.docx'
dst = os.path.join(self.dst_dir, res_file_name)
report = DocumentReport(path)
# report.set_docx_table_sections()
s1 = report.get_section('section1')
s2 = report.get_section('section2')
s2.flush(
{
'test': DocxImage(
self.test_files['test_insert_image.jpg'], 3, 2
)
}
)
s1.flush({
'test_table_row1col1': u'Hello',
'test_table_row1col2': u'simple_report',
'test_table_row1col3': DocxImage(
self.test_files['test_table_image.gif'], 3.5, 2.5
),
'test_table_row1col4': LOREM_IPSUM,
})
params = {}
report.build(dst, params)
if __name__ == '__main__':
unittest.main()
| 2.4375 | 2 |
ImageProcessing/midterm5.py | annisanurdiana/python_programming | 0 | 12789649 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 29 15:50:11 2021
@author: <NAME>
"""
import cv2 as cv
img = cv.imread("Lena.bmp", 0)
img2 = cv.imread("Baboon.png", 0)
cv.imshow("FirstOriginal.Img",img)
cv.imshow("SecondOriginal.Img",img2)
pic = img[:200,:200]
pic2 = img2[img2.shape[0]-200:img2.shape[0],img2.shape[1]-200:img2.shape[1]]
cv.imshow("FirstImage",pic)
cv.imshow("SecondImage",pic2)
for i in range(200):
for j in range(200):
if int(pic2[i,j]) - int(pic[i,j]) < -1:
pic2[i,j] = int(pic2[i,j]) - int(pic[i,j])
else:
pic2[i,j] = 0
cv.imshow("Result", pic2)
cv.waitKey(0)
cv.destroyAllWindows() | 2.875 | 3 |
sparse2spatial/RFRbuild.py | tsherwen/sparse2spatial | 1 | 12789650 | """
Build ensemble models from ensemble of RandomForestRegressor models
"""
import sys
import numpy as np
import pandas as pd
import xarray as xr
import datetime as datetime
import sklearn as sk
from sklearn.ensemble import RandomForestRegressor
import glob
# import AC_tools (https://github.com/tsherwen/AC_tools.git)
import AC_tools as AC
# s2s imports
import sparse2spatial.utils as utils
import sparse2spatial.RFRanalysis as RFRanalysis
from sparse2spatial.RFRanalysis import get_core_stats_on_current_models
def build_or_get_models(df=None, testset='Test set (strat. 20%)',
save_model_to_disk=False, read_model_from_disk=True,
target='Iodide', model_names=None,
delete_existing_model_files=False, rm_outliers=True,
model_sub_dir='/TEMP_MODELS/', random_state=42,
rm_LOD_filled_data=False, model_feature_dict=None,
debug=False):
"""
Build (or read from disc) various models (diff. features) to test comparisons
Parameters
-------
df (pd.DataFrame): DataFrame of target and features values for point locations
testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80%
save_model_to_disk (bool): Save the models to disc as pickled binaries?
read_model_from_disk (bool): read the models from disc if they are already built?
target (str): Name of the target variable (e.g. iodide)
model_names (list): List of model names to build/read
random_state (int), the seed used by the random number generator
delete_existing_model_files (bool): delete the existing model binaries in folder?
rm_outliers (bool): remove the outliers from the observational dataset
rm_LOD_filled_data (bool): remove the limit of detection (LOD) filled values?
rm_Skagerrak_data (bool): Remove specific data
(above argument is a iodide specific option - remove this)
model_feature_dict (dict): dictionary of features used in each model
model_sub_dir (str): the sub directory in which the models are to be saved/read
debug (bool): run and debug function/output
Returns
-------
(dict)
"""
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals import joblib
import gc
# - Get processed data
if isinstance(df, type(None)):
print('Dictionary of model names and features must be provided!')
sys.exit()
# - Get local variables
# Location to save models
data_root = utils.get_file_locations('data_root')
folder = '{}/{}/models/LIVE/{}/'.format(data_root, target, model_sub_dir)
if debug:
print('Using models from {}'.format(folder))
# Get details on model setups to use
if isinstance(model_feature_dict, type(None)):
print('Dictionary of model names and features must be provided!')
sys.exit()
if isinstance(model_names, type(None)):
model_names = list(sorted(model_feature_dict.keys()))
# Set a hyperparameter settings
hyperparam_dict = utils.get_hyperparameter_dict()
# Setup dictionaries to save model detail into
N_features_used = {}
features_used_dict = {}
oob_scores = {}
models_dict = {}
# Loop model input variable options and build models
if not read_model_from_disk:
for n_model_name, model_name in enumerate(model_names):
print(n_model_name, model_name)
# Get testing features and hyperparameters to build model
features_used = model_feature_dict[model_name]
n_estimators = hyperparam_dict['n_estimators']
oob_score = hyperparam_dict['oob_score']
# Select and split variables in the training and test dataset
train_set_tr = df.loc[df[testset] != True, features_used]
train_set_tr_labels = df.loc[df[testset] != True, target]
# Build model (Setup and fit)
model = RandomForestRegressor(random_state=random_state,
n_estimators=n_estimators,
oob_score=oob_score,
criterion='mse')
# Provide the model with the features (features_used) and
# The labels ( target, train_set_tr_labels)
model.fit(train_set_tr, train_set_tr_labels)
# Save model in temporary folder?
if save_model_to_disk:
# Check if there are any existing files...
pkls_in_dir = glob.glob(folder+'*.pkl')
Npkls = len(pkls_in_dir)
if delete_existing_model_files and (n_model_name == 0):
import os
[os.remove(i) for i in pkls_in_dir]
print('WARNING: deleted existing ({}) pkls'.format(Npkls))
elif(not delete_existing_model_files) and (n_model_name == 0):
assert Npkls == 0, 'WARNING: model files exist!'
else:
pass
# Save models as pickles
model_savename = "my_model_{:0>4}.pkl".format(n_model_name)
try:
joblib.dump(model, folder+model_savename)
except FileNotFoundError:
prt_str = "WARNING: Failed to save file - @ '{}' with name '{}'"
print( prt_str.format(folder+model_savename))
utils.check_or_mk_directory_struture()
# Also keep models online in dictionary
models_dict[model_name] = model
# force local tidy of garbage
gc.collect()
# Loop model and predict for all values
# If time to make models too great, then read-in here and 'rm' from above
for n_model_name, model_name in enumerate(model_names):
# Get testing features and hyperparameters to build model
features_used = model_feature_dict[model_name]
print(n_model_name, model_name, features_used)
# Read models from disk?
if (not save_model_to_disk) and (read_model_from_disk):
model_savename = "my_model_{:0>4}.pkl".format(n_model_name)
model = joblib.load(folder+model_savename)
models_dict[model_name] = model
else:
model = models_dict[model_name]
# Predict target for all observation locations
df[model_name] = model.predict(df[features_used].values)
# Save number of features used too
N_features_used[model_name] = len(features_used)
features_used_dict[model_name] = '+'.join(features_used)
try:
oob_scores[model_name] = model.oob_score_
except:
oob_scores[model_name] = np.NaN
models_dict[model_name] = model
# Return models and predictions in a dictionary structure
RFR_dict = {}
RFR_dict['models_dict'] = models_dict
RFR_dict['model_names'] = model_names
RFR_dict['df'] = df
RFR_dict['features_used_dict'] = features_used_dict
RFR_dict['N_features_used'] = N_features_used
RFR_dict['oob_scores'] = oob_scores
return RFR_dict
def get_features_used_by_model(models_list=None, RFR_dict=None):
"""
Get the (set of) features used by a list of models
Parameters
-------
RFR_dict (dict): dictionary of core variables and data
models_list (list): list of model names to get features for
Returns
-------
(list)
"""
# Get dictionary of shared data if not provided
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
# Get models to use (assume top models, if not provided)
if isinstance(models_list, type(None)):
models_list = get_top_models(RFR_dict=RFR_dict, vars2exclude=['DOC', 'Prod'])
# Now plot up in input variables
features_used_dict = RFR_dict['features_used_dict']
vars2use = []
for model_name in models_list:
vars2use += [features_used_dict[model_name].split('+')]
# Remove double ups
vars2use = [j for i in vars2use for j in i]
return list(set(vars2use))
def get_top_models(n=10, stats=None, RFR_dict=None, vars2exclude=None,
exclude_ensemble=True, verbose=True):
"""
retrieve the names of the top models (default=top 10)
Parameters
-------
n (int), the number of top ranked models to return
vars2exclude (list): list of variables to exclude (e.g. DEPTH)
RFR_dict (dict): dictionary of core variables and data
exclude_ensemble (bool): exclude the ensemble prediction from the list
verbose (bool): print out verbose output?
Returns
-------
(list)
"""
# Get stats on models in RFR_dict
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
if isinstance(stats, type(None)):
stats = get_core_stats_on_current_models(
RFR_dict=RFR_dict, verbose=False)
# Don't count the Ensemble in the top ranking models
if exclude_ensemble:
var_ = 'RFR(Ensemble)'
try:
stats = stats.T[[i for i in stats.T.columns if var_ not in i]].T
if verbose:
print('removed {} from list'.format(var_))
except:
if verbose:
print('failed to remove {} from list'.format(var_))
# Return the top model's names
params2inc = stats.T.columns
# Exclude any variables in provided list
if not isinstance(vars2exclude, type(None)):
for var_ in vars2exclude:
params2inc = [i for i in params2inc if var_ not in i]
# Return the updated dataframe's index (model names that are top models)
return list(stats.T[params2inc].T.head(n).index)
def Hyperparameter_Tune4choosen_models(RFR_dict=None, target='Iodide', cv=7,
testset='Test set (strat. 20%)'):
"""
Driver to tune mutiple RFR models
Parameters
-------
testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80%
cv (int), number of folds of cross-validation to use
target (str): Name of the target variable (e.g. iodide)
RFR_dict (dict): dictionary of models, data and shared variables
Returns
-------
(None)
"""
from sklearn.externals import joblib
# Get the data for the models
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
# Set models to optimise
models2compare = get_top_models(RFR_dict=RFR_dict, vars2exclude=['DOC', 'Prod'])
# Get variables needed from core dictionary
features_used_dict = RFR_dict['features_used_dict']
models_dict = RFR_dict['models_dict']
# Set folder to use for optimised models
data_root = utils.get_file_locations('data_root')
folder = '{}/{}/models/LIVE/OPTIMISED_MODELS/'.format(data_root, target)
# Loop and save optimised model
# NOTE: this could be speed up by using more cores
for model_name in models2compare:
print('Optimising model: {}'.format(model_name))
# Get model
model = models_dict[model_name]
# get testing features
features_used = features_used_dict[model_name].split('+')
# Tune parameters
BE = Hyperparameter_Tune_model(model=model, use_choosen_model=False,
save_best_estimator=True, model_name=model_name,
RFR_dict=RFR_dict,
features_used=features_used, cv=cv)
# - Test the tuned models against the test set
test_the_tuned_models = False
if test_the_tuned_models:
# Get the core data
df = RFR_dict['df']
# Get the data
test_set = df.loc[df[testset] == True, :]
train_set = df.loc[df[testset] == False, :]
# Test the improvements in the optimised models?
for model_name in models2compare:
# - Get existing model
model = models_dict[model_name]
# Get testing features
features_used = features_used_dict[model_name].split('+')
# - Get the data
# ( Make sure to remove the target )
# train_features = df[features_used].loc[ train_set.index ]
# train_labels = df[[target]].loc[ train_set.index ]
test_features = df[features_used].loc[test_set.index]
test_labels = df[[target]].loc[test_set.index]
# - test the existing model
print(' ---------------- '*3)
print(' ---------------- {}: '.format(model_name))
print(' - Base values: ')
quick_model_evaluation(model, test_features, test_labels)
# - Get optimised model
try:
model_savename = "my_model_{}.pkl".format(model_name)
OPmodel = joblib.load(folder + model_savename)
#
print(' - Optimised values: ')
quick_model_evaluation(OPmodel, test_features, test_labels)
except:
pass
# - Test the tuned models against the training set
# Get the core data
df = RFR_dict['df']
# get the data
test_set = df.loc[df[testset] == True, :]
train_set = df.loc[df[testset] == False, :]
# Test the improvements in the optimised models?
for model_name in models2compare:
# - Get existing model
model = models_dict[model_name]
# get testing features
features_used = features_used_dict[model_name].split('+')
# - Get the data
# ( Making sure to remove the target!!! )
train_features = df[features_used].loc[train_set.index]
train_labels = df[[target]].loc[train_set.index]
# test_features = df[features_used].loc[ test_set.index ]
# test_labels = df[[target]].loc[ test_set.index ]
# - test the existing model
print(' ---------------- '*3)
print(' ---------------- {}: '.format(model_name))
print(' - Base values: ')
quick_model_evaluation(model, train_features, train_labels)
# - Get optimised model
try:
model_savename = "my_model_{}.pkl".format(model_name)
OPmodel = joblib.load(folder + model_savename)
#
print(' - Optimised values: ')
quick_model_evaluation(OPmodel, train_features, train_labels)
except:
pass
def Hyperparameter_Tune_model(use_choosen_model=True, model=None,
RFR_dict=None, df=None, cv=3,
testset='Test set (strat. 20%)', target='Iodide',
features_used=None, model_name=None,
save_best_estimator=True):
"""
Driver to tune hyperparmeters of model
Parameters
-------
testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80%
target (str): Name of the target variable (e.g. iodide)
RFR_dict (dict): dictionary of core variables and data
model_name (str): name of model to tune performance of
features_used (list): list of the features within the model_name model
save_best_estimator (bool): save the best performing model offline
model (RandomForestRegressor), Random Forest Regressor model to tune
cv (int), number of folds of cross-validation to use
Returns
-------
(RandomForestRegressor)
"""
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestRegressor
# Get data to test
if isinstance(df, type(None)):
# df = get_dataset_processed4ML()
df = RFR_dict['df']
# Use the model selected from the feature testing
if use_choosen_model:
assert_str = "model name not needed as use_choosen_model selected!"
assert isinstance(model, type(None)), assert_str
# select a single chosen model
mdict = get_choosen_model_from_features_selection()
features_used = mdict['features_used']
model = mdict['model']
model_name = mdict['name']
# - extract training dataset
test_set = df.loc[df[testset] == True, :]
train_set = df.loc[df[testset] == False, :]
# also sub select all vectors for input data
# ( Making sure to remove the target!!! )
train_features = df[features_used].loc[train_set.index]
train_labels = df[[target]].loc[train_set.index]
test_features = df[features_used].loc[test_set.index]
test_labels = df[[target]].loc[test_set.index]
# - Make the base model for comparisons
base_model = RandomForestRegressor(n_estimators=10, random_state=42,
criterion='mse')
base_model.fit(train_features, train_labels)
quick_model_evaluation(base_model, test_features, test_labels)
# - First make an intial explore of the parameter space
rf_random = Use_RS_CV_to_explore_hyperparams(cv=cv,
train_features=train_features,
train_labels=train_labels,
features_used=features_used
)
# Check the performance by Random searching (RandomizedSearchCV)
best_random = rf_random.best_estimator_
best_params_ = rf_random.best_params_
print(rf_random.best_params_)
quick_model_evaluation(best_random, test_features, test_labels)
# - Now do a more focused optimisation
# get the parameters based on the RandomizedSearchCV output
param_grid = define_hyperparameter_options2test(
features_used=features_used, best_params_=best_params_,
param_grid_RandomizedSearchCV=True)
# Use GridSearchCV
grid_search = use_GS_CV_to_tune_Hyperparams(cv=cv,
train_features=train_features,
param_grid=param_grid,
train_labels=train_labels,
features_used=features_used,
)
print(grid_search.best_params_)
# Check the performance of grid seraching searching
BEST_ESTIMATOR = grid_search.best_estimator_
quick_model_evaluation(BEST_ESTIMATOR, test_features, test_labels)
# Save the best estimator now for future use
if save_best_estimator:
data_root = utils.get_file_locations('data_root')
folder = '{}/{}/models/LIVE/OPTIMISED_MODELS/'.format(data_root, target)
model_savename = "my_model_{}.pkl".format(model_name)
joblib.dump(BEST_ESTIMATOR, folder + model_savename)
else:
return BEST_ESTIMATOR
def Use_RS_CV_to_explore_hyperparams(train_features=None,
train_labels=None,
features_used=None,
test_features=None,
test_labels=None,
scoring='neg_mean_squared_error',
cv=3):
"""
Intial test of parameter space using RandomizedSearchCV
Parameters
-------
features_used (list): list of the features used by the model
train_features (list): list of the training features
train_labels (list): list of the training labels
test_features (list): list of the testing features
test_labels (list): list of the testing labels
cv (int), number of folds of cross-validation to use
scoring (str): scoring method to use
"""
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start=10, stop=1000, num=10)]
# Number of features to consider at every split
# max_features = ['auto', 'sqrt']
max_features = range(1, 30)
if not isinstance(features_used, type(None)):
max_features = [i for i in max_features if
i <= len(features_used)]
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
# bootstrap = [True, False]
bootstrap = [True] # Force use of bootstrapping
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
# Use the random grid to search for best hyperparameters
# First create the base model to tune
rf = RandomForestRegressor(random_state=42, criterion='mse')
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
rf_random = RandomizedSearchCV(estimator=rf,
param_distributions=random_grid, n_iter=100, cv=cv,
verbose=2,
random_state=42, n_jobs=-1, scoring=scoring)
# Fit the random search model
rf_random.fit(train_features, train_labels)
return rf_random
def use_GS_CV_to_tune_Hyperparams(param_grid=None,
train_features=None, train_labels=None,
features_used=None, \
scoring='neg_mean_squared_error', cv=3,
):
"""
Refine hyperparameters using (GridSearchCV)
Parameters
-------
features_used (list): list of the features used by the model
train_features (list): list of the training features
train_labels (list): list of the training labels
cv (int), number of folds of cross-validation to use
scoring (str): scoring method to use
"""
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
# Create a based model
rf = RandomForestRegressor(random_state=42, criterion='mse')
# Instantiate the grid search model
grid_search = GridSearchCV(estimator=rf, param_grid=param_grid,
cv=cv, n_jobs=-1, verbose=2, scoring=scoring)
# Fit the grid search to the data
grid_search.fit(train_features, train_labels)
return grid_search
def quick_model_evaluation(model, test_features, test_labels):
"""
Perform a quick model evaluation
"""
from sklearn.metrics import mean_squared_error
predictions = model.predict(test_features)
MSE = mean_squared_error(test_labels, predictions)
RMSE = np.sqrt(MSE)
ME = np.mean(abs(predictions - test_labels.values))
print('Model Performance')
print('Mean squared error (MAE): {:0.4f} nM'.format(MSE))
print('Mean absolute error (MAE): {:0.4f} nM'.format(ME))
print('RMSE = {:0.2f}'.format(RMSE))
return RMSE
def define_hyperparameter_options2test(features_used=None,
param_grid_RandomizedSearchCV=True,
best_params_=None,
param_grid_intial_guess=True,
):
"""
Define a selction of test groups
Parameters
-------
param_grid_intial_guess (bool): use the parameter grid of guesses
param_grid_RandomizedSearchCV (bool): use the parameter grid obtained
by randomly searching
best_params_ (param_grid), parameter grid of best parameters to use
features_used (list): list of the features used by the model
"""
# - Shared variables in grid
vals2test = {
'n_estimators': [10, 50, 75, 100, 125, 200, 300, 500],
'max_features': [1, 2, 3, 4, 5],
'max_depth': [3, 10, None],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [1, 3, 10],
'oob_score': [True],
'bootstrap': [True],
}
if param_grid_RandomizedSearchCV:
if not isinstance(best_params_, type(None)):
vals2test_ASSUMED = vals2test.copy()
vals2test = {}
for key in best_params_:
value = best_params_[key]
# 'n_estimators' / trees
if (key == 'n_estimators'):
values = [value+(i*10) for i in range(0, 4)]
values += [value+(i*10) for i in range(-4, 0)]
# only allow values greater than zero
values = [i for i in values if i > 0]
# add sorted values
vals2test[key] = sorted(values)
# max depth
elif (key == 'max_depth'):
# value is either a number of "None".
if utils.is_number(value):
values = [value+(i*5) for i in range(0, 2)]
values += [value+(i*5) for i in range(-2, 0)]
# only allow values greater than zero
values = [i for i in values if i > 0]
# add sorted values
vals2test[key] = sorted(values)
else: # If None, just use None.
vals2test[key] = [value]
# 'min_samples_split'
elif (key == 'min_samples_leaf'):
if value == 1:
values = range(value, value+3)
else:
values = [value, value+1, value+2]
# add sorted values
vals2test[key] = list(sorted(values))
# 'min_samples_split'
elif (key == 'min_samples_split'):
values = [value, value+1, value+2]
# add sorted values
vals2test[key] = list(sorted(values))
# Add bootstrap and 'max_features' as recived
elif (key == 'bootstrap') or (key == 'max_features'):
vals2test[key] = [value]
# Check the key has settings intialised for
else:
print('No settings setup for {}'.format(key))
sys.exit()
# check all the values in best_params_ are in dict
new_keys = best_params_.keys()
old_keys = vals2test_ASSUMED.keys()
extra_keys = [i for i in old_keys if i not in new_keys]
print('WARNING: adding standard keys for: ', extra_keys)
for key in extra_keys:
vals2test[key] = vals2test_ASSUMED[key]
# check all values in
all_in_dict = any([i not in vals2test.keys() for i in new_keys])
assert (not all_in_dict), 'Missing keys from provided best_params_'
else:
vals2test = {
'n_estimators': [80+(i*10) for i in range(8)],
'max_features': [1, 2, 3, 4, 5],
'max_depth': [90+(i*5) for i in range(5)],
'min_samples_split': [4, 5, 6],
'min_samples_leaf': [1, 2, 3],
'oob_score': [True],
'bootstrap': [True],
}
# Check the number of variations being tested
def prod(iterable):
import operator
return reduce(operator.mul, iterable, 1)
len_of_values = [len(vals2test[i]) for i in vals2test.keys()]
print('WARNING: # of variations undertest = {}'.format(prod(len_of_values)))
# Make sure the max features isn't set to more features_used that known
if not isinstance(features_used, type(None)):
max_features = vals2test['max_features']
max_features = [i for i in max_features if
i <= len(features_used)]
vals2test['max_features'] = max_features
# --- Setup a parameter grid for testings
param_grid = [
# - # of trees (“n_estimators”, test=10, 25, 50, 100, 250, 500)
# {
# 'bootstrap': [True],
# 'n_estimators': vals2test['n_estimators'],
# 'oob_score': [True],
# },
# # - # of features/”variables” (“max_features”, test= 2,3,4, None)
# {
# 'bootstrap': [True],
# 'max_features': vals2test['max_features2test'],
# 'oob_score': [True],
# },
# # - both of the above
# {
# 'bootstrap': [True],
# 'n_estimators': vals2test['n_estimators'],
# 'max_features': vals2test['max_features'],
# 'oob_score': [True],
# },
# # - Minimum samples per leaf
# {
# 'bootstrap': [True],
# "min_samples_leaf": vals2test['min_samples_leaf'],
# 'oob_score': [True],
# },
# # - Depth
# {
# 'bootstrap': [True],
# "max_depth": max_depth2test,
# 'oob_score': [True],
# },
# # - Split?
# {
# 'bootstrap': [True],
# "min_samples_split": vals2test['min_samples_split'],
# 'oob_score': [True],
# },
# - all of the above
{
'bootstrap': vals2test['bootstrap'],
'n_estimators': vals2test['n_estimators'],
'max_features': vals2test['max_features'],
"min_samples_split": vals2test['min_samples_split'],
"min_samples_leaf": vals2test['min_samples_leaf'],
"max_depth": vals2test['max_depth'],
'oob_score': vals2test['oob_score'],
},
]
if param_grid_intial_guess:
return param_grid
elif return_random_informed_grid:
return param_grid_RandomizedSearchCV
def mk_predictions_NetCDF_4_many_builds(model2use, res='4x5',
models_dict=None, features_used_dict=None,
RFR_dict=None, target='Iodide',
stats=None, plot2check=False,
rm_Skagerrak_data=False,
debug=False):
"""
Make a NetCDF file of predicted variables for a given resolution
Parameters
-------
model2use (str): name of the model to use
target (str): Name of the target variable (e.g. iodide)
RFR_dict (dict): dictionary of core variables and data
res (str): horizontal resolution of dataset (e.g. 4x5)
features_used_dict (dict): dictionary of feature variables in models
plot2check (bool): make a quick plot to check the prediction
models_dict (dict): dictionary of RFR models and there names
stats (pd.DataFrame): dataframe of statistics on models in models_dict
rm_Skagerrak_data (bool): Remove specific data
(above argument is a iodide specific option - remove this)
debug (bool): print out debugging output?
Returns
-------
(None)
"""
from sklearn.externals import joblib
import gc
import glob
# - local variables
# extract the models...
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models(
rm_Skagerrak_data=rm_Skagerrak_data
)
# Get the variables required here
if isinstance(features_used_dict, type(None)):
features_used_dict = RFR_dict['features_used_dict']
# Set the extr_str if rm_Skagerrak_data set to True
if rm_Skagerrak_data:
extr_str = '_No_Skagerrak'
else:
extr_str = ''
# Get location to save file and set filename
folder = utils.get_file_locations('data_root') + '/data/'
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
dsA = xr.open_dataset(folder + filename)
# Get location to save ensemble builds of models
folder_str = '{}/{}/models/LIVE/ENSEMBLE_REPEAT_BUILD{}/'
folder = folder_str.format(folder, target, extr_str)
# - Make a dataset for each model
ds_l = []
# Get list of twenty models built
models_str = folder + '*{}*.pkl'.format(model2use)
builds4model = glob.glob(models_str)
print(builds4model, models_str)
# Print a string to debug the output
db_str = "Found {} saved models for '{} - glob str:{}'"
print(db_str.format(len(builds4model), model2use, models_str))
# Get the numbers for the models in directory
b_modelnames = [i.split('my_model_')[-1][:-3] for i in builds4model]
# Check the number of models selected
ast_str = "There aren't models for {} in {}"
assert len(b_modelnames) > 1, ast_str.format(model2use, folder)
# Now loop by model built for ensemble member and predict values
for n_modelname, b_modelname in enumerate(b_modelnames):
# Load the model
model = joblib.load(builds4model[n_modelname])
# Get testinng features
features_used = features_used_dict[model2use].split('+')
# Make a DataSet of predicted values
ds_l += [mk_da_of_predicted_values(model=model, res=res, dsA=dsA,
modelname=b_modelname,
features_used=features_used)]
# Force local tidy of garbage
gc.collect()
# Combine datasets
ds = xr.merge(ds_l)
# - Also get values for existing parameterisations
if target == 'Iodide':
# Chance et al (2013)
param = u'Chance2014_STTxx2_I'
arr = utils.calc_I_Chance2014_STTxx2_I(dsA['WOA_TEMP'].values)
ds[param] = ds[b_modelname] # use existing array as dummy to fill
ds[param].values = arr
# MacDonald et al (2013)
param = 'MacDonald2014_iodide'
arr = utils.calc_I_MacDonald2014(dsA['WOA_TEMP'].values)
ds[param] = ds[b_modelname] # use existing array as dummy to fill
ds[param].values = arr
# Do a test diagnostic plot?
if plot2check:
for var_ in ds.data_vars:
# Do a quick plot to check
arr = ds[var_].mean(dim='time')
AC.map_plot(arr, res=res)
plt.title(var_)
plt.show()
# Save to NetCDF
save_name = 'Oi_prj_predicted_{}_{}_ENSEMBLE_BUILDS_{}_{}.nc'
ds.to_netcdf(save_name.format(target, res, model2use, extr_str))
def get_model_predictions4obs_point(df=None, model_name='TEMP+DEPTH+SAL',
model=None, features_used=None):
"""
Get model predictions for all observed points
Parameters
-------
df (pd.DataFrame): dataframe containing of target and features
features_used_dict (dict): dictionary of feature variables in models
model (RandomForestRegressor), Random Forest Regressor model to user
model_name (str): name of the model to use
Returns
-------
(np.array)
"""
# Model name?
if isinstance(model, type(None)):
print('Model now must be provided get_model_predictions4obs_point')
sys.exit()
# Testing features to use
if isinstance(features_used, type(None)):
func_name = 'get_model_predictions4obs_point'
print("The model's features must be provided to {}".format(func_name))
# Now predict for the given testing features
target_predictions = model.predict(df[features_used])
return target_predictions
def mk_test_train_sets(df=None, target='Iodide',
rand_strat=True, features_used=None,
random_state=42, rand_20_80=False,
nsplits=4, verbose=True, debug=False):
"""
Make a test and training dataset for ML algorithms
Parameters
-------
target (str): Name of the target variable (e.g. iodide)
random_state (int), seed value to use as random seed for reproducible analysis
nsplits (int), number of ways to split the data
rand_strat (bool): split the data in a random way using stratified sampling
rand_20_80 (bool): split the data in a random way
df (pd.DataFrame): dataframe containing of target and features
debug (bool): print out debugging output?
verbose (bool): print out verbose output?
Returns
-------
(list)
"""
# - make Test and training set
# to make this approach's output identical at every run
np.random.seed(42)
# - Standard random selection:
if rand_20_80:
from sklearn.model_selection import train_test_split
# Use a standard 20% test set.
train_set, test_set = train_test_split(df, test_size=0.2,
random_state=random_state)
# also sub select all vectors for input data
# ( Making sure to remove the target!!! )
train_set = df[features_used].loc[train_set.index]
test_set = df[features_used].loc[test_set.index]
test_set_targets = df[[target]].loc[test_set.index]
# - Use a random split
if rand_strat:
from sklearn.model_selection import StratifiedShuffleSplit
# Add in "SPLIT_GROUP" metric
SPLITvar = 'SPLIT_GROUP'
use_ceil_of_log = False # This approach was only used
if use_ceil_of_log:
# Original approach taken for AGU work etc
ceil_ln_limited = np.ceil(np.log(df[target]))
# push bottom end values into lower bin
ceil_ln_limited[ceil_ln_limited <= 2] = 2
# push top end values in higher bin
ceil_ln_limited[ceil_ln_limited >= 5] = 5
df[SPLITvar] = ceil_ln_limited
else:
# Use decals and put the bins with high values to together
# NOTE: use quartile cut (pd.qcut, not pd.cut)
# df[SPLITvar] = pd.cut(df[target].values,10).codes.astype(int)
# Combine the lesser populated higher 5 bins into the 5th bin
# df.loc[ df[SPLITvar] >= 4, SPLITvar ] = 4
# qcut will split the data into N ("nsplits") bins (e.g. quintiles)
# pd.qcut(df[target].values,5).value_counts()
df[SPLITvar] = pd.qcut(df[target].values, nsplits).codes
if verbose:
print(df[SPLITvar].value_counts())
# setup the split
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2,
random_state=random_state)
# Now split
for train_index, test_index in split.split(df, df[SPLITvar]):
train_set = df.loc[train_index]
test_set = df.loc[test_index]
test_set_targets = df[[target]].loc[test_index]
# Gotcha for changes in array index
Na = df[~df.index.isin(train_index.tolist() + test_index.tolist())]
if (Na.shape[0] < 0):
print('WARNING'*20)
print(Na)
# Print out the split of the bins...
if verbose:
dfs = {
'ALL data': df, 'test data': test_set, 'train data': train_set
}
for key_ in dfs.keys():
print('data split in: {}'.format(key_))
print(dfs[key_][SPLITvar].value_counts() / dfs[key_].shape[0])
# Now remove the SPLIT group
for set_ in train_set, test_set:
set_.drop(SPLITvar, axis=1, inplace=True)
return train_set, test_set, test_set_targets
def mk_predictions_for_3D_features(dsA=None, RFR_dict=None, res='4x5',
models_dict=None, features_used_dict=None,
stats=None, folder=None, target='Iodide',
use_updated_predictor_NetCDF=False,
save2NetCDF=False, plot2check=False,
models2compare=[], topmodels=None,
xsave_str='',
add_ensemble2ds=False,
verbose=True, debug=False):
"""
Make a NetCDF file of predicted target from feature variables for a given resolution
Parameters
----------
dsA (xr.Dataset): dataset object with variables to interpolate
RFR_dict (dict): dictionary of core variables and data
res (str): horizontal resolution (e.g. 4x5) of Dataset
save2NetCDF (bool): save interpolated Dataset to as a NetCDF?
features_used_dict (dict): dictionary of feature variables in models
models_dict (dict): dictionary of RFR models and there names
stats (pd.DataFrame): dataframe of statistics on models in models_dict
folder (str): location of NetCDF file of feature variables
target (str): name of the species being predicted
models2compare (list): list of models to make spatial predictions for (rm: double up?)
topmodels (list): list of models to make spatial predictions for
xsave_str (str): string to include as suffix in filename used for saved NetCDF
add_ensemble2ds (bool): calculate std. dev. and mean for list of topmodels
verbose (bool): print out verbose output?
debug (bool): print out debugging output?
Returns
-------
(xr.Dataset)
"""
# Make sure the core dictionary is provided
assert (type(RFR_dict) ==
dict), 'Core variables must be provided as dict (RFR_dict)'
# Make sure a full list of models was provided
assert (len(models2compare) > 0), 'List of models to must be provided!'
# Inc. all the topmodels in the list of models to compare if they have been provided.
if isinstance(topmodels, type(list)):
models2compare += topmodels
# Remove any double ups in list of of models to predict
models2compare = list(set(models2compare))
# Get the variables required here
if isinstance(models_dict, type(None)):
models_dict = RFR_dict['models_dict']
if isinstance(features_used_dict, type(None)):
features_used_dict = RFR_dict['features_used_dict']
# Get location to save file and set filename
if isinstance(folder, type(None)):
folder = utils.get_file_locations('data_root') + '/data/'
if isinstance(dsA, type(None)):
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
dsA = xr.open_dataset(folder + filename)
# - Make a dataset of predictions for each model
ds_l = []
for modelname in models2compare:
# get model
model = models_dict[modelname]
# get testinng features
features_used = utils.get_model_features_used_dict(modelname)
# Make a DataSet of predicted values
ds_tmp = utils.mk_da_of_predicted_values(dsA=dsA, model=model, res=res,
modelname=modelname,
features_used=features_used)
# Add attributes to the prediction
ds_tmp = utils.add_attrs2target_ds(ds_tmp, add_global_attrs=False,
varname=modelname)
# Save to list
ds_l += [ds_tmp]
# Combine datasets
ds = xr.merge(ds_l)
# - Also get values for parameterisations
# if target == 'Iodide':
# # Chance et al (2013)
# param = u'Chance2014_STTxx2_I'
# arr = utils.calc_I_Chance2014_STTxx2_I(dsA['WOA_TEMP'].values)
# ds[param] = ds[modelname] # use existing array as dummy to fill
# ds[param].values = arr
# # MacDonald et al (2013)
# param = 'MacDonald2014_iodide'
# arr = utils.calc_I_MacDonald2014(dsA['WOA_TEMP'].values)
# ds[param] = ds[modelname] # use existing array as dummy to fill
# ds[param].values = arr
# Add ensemble to ds too
if add_ensemble2ds:
print('WARNING: Using topmodels for ensemble as calculated here')
var2template = list(ds.data_vars)[0]
ds = RFRanalysis.add_ensemble_avg_std_to_dataset(ds=ds, res=res,
target=target,
RFR_dict=RFR_dict,
topmodels=topmodels,
var2template=var2template,
save2NetCDF=False)
# Add global attributes
ds = utils.add_attrs2target_ds(ds, add_varname_attrs=False)
# Save to NetCDF
if save2NetCDF:
filename = 'Oi_prj_predicted_{}_{}{}.nc'.format(target, res, xsave_str)
ds.to_netcdf(filename)
else:
return ds
| 2.53125 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.