content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import datetime
import os
import shutil
import subprocess
import urllib.request
from contextlib import closing
import numpy as np
import pandas as pd
import requests
import wbml.out
__all__ = [
"DependencyError",
"resource",
"dependency",
"asserted_dependency",
"split_df",
"data_path",
"date_to_decimal_year",
]
class DependencyError(AssertionError):
"""Exception raised in case of an erroneous dependency."""
def resource(target, url, post=False, **kw_args):
"""Specify a dependency on an online resource.
Further takes in keyword arguments that are passed to the appropriate method
from :mod:`requests` or :mod:`urllib`.
Args:
target (str): Target file.
url (str): Source URL.
post (bool, optional): Make a POST request instead of a GET request.
Only applicable if the URL starts with "http" or "https". Defaults
to `False`.
"""
if not os.path.exists(target):
with wbml.out.Section("Downloading file"):
wbml.out.kv("Source", url)
wbml.out.kv("Target", target)
# Ensure that all directories in the path exist.
make_dirs(target)
# If the URL starts with "ftp", use the :mod:`urllib` library.
if url.startswith("ftp"):
with closing(urllib.request.urlopen(url, **kw_args)) as r:
with open(target, "wb") as f:
shutil.copyfileobj(r, f)
# By default, use the :mod:`requests` library.
else:
request = requests.post if post else requests.get
with request(url, stream=True, **kw_args) as r:
with open(target, "wb") as f:
shutil.copyfileobj(r.raw, f)
def dependency(target, source, commands):
"""Specify a dependency that is generated from an existing file.
Args:
target (str): Target file.
source (str): Source file.
commands (list[str]): List of commands to generate target file.
"""
if not os.path.exists(target):
with wbml.out.Section("Generating file"):
wbml.out.kv("Source", source)
wbml.out.kv("Target", target)
# Check that the source exists.
if not os.path.exists(source):
raise DependencyError(
f'Source "{source}" asserted to exist, but it does not.'
)
# Save current working directory.
current_wd = os.getcwd()
# Ensure that all directories in the path exist.
make_dirs(target)
# Perform commands.
for command in commands:
wbml.out.out(command)
# Change working directory to directory of target file, run
# command, and restore working directory afterwards.
os.chdir(os.path.dirname(target))
subprocess.call(command, shell=True)
os.chdir(current_wd)
def asserted_dependency(target):
"""Specify a dependency that cannot be fetched.
Args:
target (str): Target file.
"""
if not os.path.exists(target):
raise DependencyError(
f'Dependency "{target}" is asserted to exist, '
f"but it does not, and it cannot be "
f"automatically fetched. Please put the file "
f"into place manually."
)
def make_dirs(path):
"""Make the directories in the path of a file.
Args:
path (url): Path of a file.
"""
os.makedirs(os.path.dirname(path), exist_ok=True)
def data_path(*xs):
"""Get the path of a data file.
Args:
*xs (str): Parts of the path.
Returns:
str: Absolute path.
"""
return os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, "data", *xs)
)
def split_df(df, index_range, columns, iloc=False):
"""Split a data frame by selecting from columns a particular range.
Args:
df (:class:`pd.DataFrame`): Data frame to split.
index_range (tuple): Tuple containing lower and upper limit of the
range to split the index by. If `index_range = (a, b)`, then
`[a, b)` is taken.
columns (list[object]): Columns to select.
iloc (bool, optional): The index range is the integer location instead
of the index value. Defaults to `False`.
Returns:
tuple[:class:`pd.DataFrame`]: Selected rows from selected columns
and the remainder.
"""
if iloc:
inds = np.arange(df.shape[0])
rows = (inds >= index_range[0]) & (inds < index_range[1])
else:
rows = (df.index >= index_range[0]) & (df.index < index_range[1])
selected = pd.DataFrame([df[name][rows] for name in columns]).T
remainder = pd.DataFrame(
[df[name][~rows] for name in columns]
+ [df[name] for name in set(df.columns) - set(columns)]
).T
# Fix order of columns.
selected_inds = [i for i, c in enumerate(df.columns) if c in columns]
selected = selected.reindex(df.columns[np.array(selected_inds)], axis=1)
remainder = remainder.reindex(df.columns, axis=1)
return selected, remainder
def date_to_decimal_year(date, format=None):
"""Convert a date to decimal year.
Args:
date (str): Date as a string.
format (str, optional): Format of the date if a conversion is needed.
Returns:
float: Decimal year corresponding to the date.
"""
if format:
date = datetime.datetime.strptime(date, format)
start = datetime.date(date.year, 1, 1).toordinal()
year_length = datetime.date(date.year + 1, 1, 1).toordinal() - start
# Account for subday time.
subday_time = 0
if hasattr(date, "hour"):
subday_time += date.hour / year_length / 24
if hasattr(date, "minute"):
subday_time += date.minute / year_length / 24 / 60
if hasattr(date, "second"):
subday_time += date.second / year_length / 24 / 60 / 60
return date.year + float(date.toordinal() - start) / year_length + subday_time
| 31.176768 | 82 | 0.599708 | [
"MIT"
] | wesselb/wbml | wbml/data/data.py | 6,173 | Python |
#! /usr/bin/env python
# coding utf-8
import sys
from sys import exit
import os
import socket
import requests
import smtplib
import ssl
import dns.resolver
""" Python script to monitor list of url (https/http/ns/mx)
and send mail if down"""
__author__ = "Benjamin Kittler"
__copyright__ = "Copyright 2021, KITTLER"
__credits__ = ["Benjamin Kittler"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Benjamin Kittler"
__email__ = "kittler @T. gmail. com"
__status__ = "integration"
"""
############################################################
# Please complete these variable before the first launch #
############################################################
"""
# mail provider : TO BE MODIFIED
smtp_address = 'smtp.gmail.com'
smtp_port = 465
# email address and password : TO BE MODIFIED
email_address = '[email protected]'
email_password = 'PASSWORD'
""" Python script to monitor list of url (https/http/ns/mx)
and send mail if down"""
def check(file_to_check, testmode, debug):
"""
Function open file, read each line and complete a dictionnary
For each entry, launch check url : http/https or launch resolution then ping for MX/NS entry
If one url not respond, launch email to alert
Parameters
----------
file_to_check : string
This is the name of the fillethat contain list of url must be checked
and mail for alert
testmode : string
This value is 0 by defaut and is to 1 if user launchscript on test mode:
print enabled and no mail send
debug : string
This value is 0 by defaut and is to 1 if user launchscript on debug mode:
more print enabled and no mail send
Returns
-------
None.
"""
try:
file = open(file_to_check, "r")
except:
exit('open file failed')
# lines contain all line of file
lines = file.readlines()
# close the file after read all lines
file.close()
# create dict of url
url_dict = {}
# add each element on dict
for line in lines:
# clean end of line contain \n
line = line.replace("\n", "")
# clean line contain multiple space
line = line.replace(" ", "\t")
# clean line contain multiple \t
line = line.replace("\t\t\t", "\t")
line = line.replace("\t\t", "\t")
# clean line contain http:// or https://
line = line.replace("http://", "")
line = line.replace("https://", "")
element = line.split("\t")
cle = element[0]
data = element[1]
url_dict[cle] = data
if debug == 1:
print("Url dict : \n", url_dict)
if testmode == 1:
print("Check :")
for url, mail in url_dict.items():
# check http or https entry
if "ns://" not in url and "mx://" not in url and "ping://" not in url:
availability = str(request_url(url))
# import pdb; pdb.set_trace()
if (availability == ("200") or (availability == "301")
or (availability == "302")):
request_url_result = "UP"
else:
request_url_result = "DOWN"
if testmode == 1:
print("url : ", url, " -> mail : ", mail,
" Result :", request_url_result)
else:
if request_url_result == "DOWN":
# print("mail :", mail)
alert_mail(mail, request_url_result, url)
# check ns entry
elif "ns://" in url:
request_url_result = ping_name(url, "NS")
if testmode == 1:
print("url : ", url, " -> mail : ", mail,
" Result NS :", request_url_result)
else:
if request_url_result == "DOWN":
# print("mail :", mail)
alert_mail(mail, request_url_result, url)
# check mx entry
elif "mx://" in url:
request_url_result = ping_name(url, "MX")
if testmode == 1:
print("url : ", url, " -> mail : ", mail,
" Result MX :", request_url_result)
else:
if request_url_result == "DOWN":
# print("mail :", mail)
alert_mail(mail, request_url_result, url)
# check ping entry
elif "ping://" in url:
url = url.replace("ping://", "")
request_url_result = ping_ip(url)
if testmode == 1:
print("url : ", url, " -> mail : ", mail,
" Result Ping :", request_url_result)
else:
if request_url_result == "DOWN":
# print("mail :", mail)
alert_mail(mail, request_url_result, url)
# ignore entry
else:
if testmode == 1:
print("url : ", url, " -> mail : ", mail, "ignored")
exit()
def request_url(url):
"""
Function to send https or http request to this url and return code result.
Parameters
----------
url : string
This variable contain url must be checked
Returns
-------
status_code : int
Code result
"""
try:
url = "https://" + format(url)
response = requests.head(url, allow_redirects=True, timeout=10)
except:
try:
url = "http://" + format(url)
response = requests.head(url, allow_redirects=True, timeout=10)
except:
return "404"
# print("Request failed")
if response.status_code:
return response.status_code
else:
return "404"
def ping_name(name, dns_type):
"""
Function to resolve name and ping this host.
print the result of ping
Parameters
----------
name : string
This variable contain the name (host) must be checked
dns_type : string
This variable contain the DNS type : A, NS, MX
Returns
-------
status : String
Status result : UP or DOWN
"""
# clean name host
name = name.replace("ns://", "")
name = name.replace("mx://", "")
# make resolution
if dns_type == "A":
try:
addr1 = socket.gethostbyname_ex(name)
print("Resolution -> {}".format(addr1[2]))
name = addr1[2]
except:
print("Resolution failed")
# make resolution
if dns_type == "MX":
try:
answers = dns.resolver.resolve(name, 'MX')
for rdata in answers:
# import pdb; pdb.set_trace()
#print('Mail exchange:',rdata.exchange)
addr1 = socket.gethostbyname_ex(str(rdata.exchange))
#print("Resolution -> {}".format(addr1[2]))
name = addr1[2]
if ping_ip(name) == "UP":
return "UP"
return ping_ip(name)
except:
print("Resolution failed")
return "DOWN"
# make resolution
if dns_type == "NS":
try:
answers = dns.resolver.resolve(name, 'NS')
for rdata in answers:
#import pdb; pdb.set_trace()
#print('Mail exchange:',rdata.exchange)
addr1 = socket.gethostbyname_ex(str(rdata.target))
#print("Resolution -> {}".format(addr1[2]))
name = addr1[2]
for srv in name:
if ping_ip(srv) == "UP":
return "UP"
return ping_ip(name)
except:
print("Resolution failed")
return "DOWN"
def ping_ip(name):
"""
Function to ping name.
return the result of ping
Parameters
----------
name : string
This variable is IP address
Returns
-------
status : String
Status result : UP or DOWN
"""
try:
# import pdb; pdb.set_trace()
name = str(name).strip('[]')
name = str(name).strip("''")
hostname = format(name)
response = os.system("ping -c 1 " + hostname + " > /dev/null 2>&1")
# import pdb; pdb.set_trace()
if response == 0:
return "UP"
# print("Response ping : OK")
else:
return "DOWN"
# print("Response ping : KO")
except requests.ConnectionError:
return "DOWN"
# print("Response ping : failed to connect")
return "DOWN"
def alert_mail(email_receiver, service_status, url):
"""
Function to send email Alert
Parameters
----------
email_receiver : string
destination email for alert
service_status : string
service status
url : string
url concertned by alert
Returns
-------
None.
"""
# create subject
service_status = "Subject:{}\n\n".format(service_status) + "Server :{} \n".format(url)
# create connexion
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_address, smtp_port, context=context) as server:
# account connexion
server.login(email_address, email_password)
# sending mail
server.sendmail(email_address, email_receiver, service_status)
def main(argv, testmode, debug):
"""
Print the fileopened and lauchn the check of file with testmode / debug value
Parameters
----------
file_to_check : string
This is the name of the fillethat contain list of url must be checked
and mail for alert
testmode : string
This value is 0 by defaut and is to 1 if user launchscript on test mode:
print enabled and no mail send
debug : string
This value is 0 by defaut and is to 1 if user launchscript on debug mode:
more print enabled and no mail send
Returns
-------
None.
"""
# print argument for verification
if testmode == 1:
print("Import file: {}".format(argv[0]))
file = str(argv[0])
# launch check file entry
check(file, testmode, debug)
if __name__ == "__main__":
"""
Get arguments from command line and fixe value :
testmode :
This value is 0 by defaut and is to 1 if user launchscript on test mode:
print enabled and no mail send
debug :
This value is 0 by defaut and is to 1 if user launchscript on debug mode:
more print enabled and no mail send
call main with arguments
"""
# pretrieve argument, seach test mode and launch main
if "-t" in sys.argv:
testmode = 1
debug = 0
elif "--test" in sys.argv:
testmode = 1
debug = 0
elif "--debug" in sys.argv:
testmode = 1
debug = 1
else:
testmode = 0
debug = 0
matching = [cmd for cmd in sys.argv if ".txt" in cmd]
main(matching, testmode, debug)
| 29.096257 | 96 | 0.541996 | [
"MIT"
] | bkittler/monitor2mail | monitor2mail.py | 10,882 | Python |
from fuzzconfig import FuzzConfig
import nonrouting
import nets
import pytrellis
import re
import fuzzloops
jobs = [
{
"cfg": FuzzConfig(job="BANKREF8", family="ECP5", device="LFE5U-45F", ncl="empty.ncl",
tiles=["MIB_R71C3:BANKREF8"]),
"side": "B",
"pin": "R1"
},
]
def main():
pytrellis.load_database("../../../database")
for job in jobs:
cfg = job["cfg"]
side = job["side"]
pin = job["pin"]
cfg.setup()
empty_bitfile = cfg.build_design(cfg.ncl, {})
cfg.ncl = "pio.v"
def get_substs(iomode, vcc, extracfg=None):
if iomode == "NONE":
iodir, type = "NONE", ""
else:
iodir, type = iomode.split("_", 1)
substs = {
"dir": iodir,
"io_type": type,
"loc": pin,
"extra_attrs": "",
"vcc": vcc
}
if extracfg is not None:
substs["extra_attrs"] = '(* {}="{}" *)'.format(extracfg[0], extracfg[1])
return substs
vcco_opts = {
"1V2": "OUTPUT_LVCMOS12",
"1V5": "OUTPUT_LVCMOS15",
"1V8": "OUTPUT_LVCMOS18",
"2V5": "OUTPUT_LVCMOS25",
"3V3": "OUTPUT_LVCMOS33",
"NONE": "INPUT_LVCMOS12",
}
nonrouting.fuzz_enum_setting(cfg, "BANK.VCCIO", list(sorted(vcco_opts.keys())),
lambda x: get_substs(iomode=vcco_opts[x], vcc=x.replace("V", ".") if x != "NONE" else "2.5"),
empty_bitfile)
if __name__ == "__main__":
main()
| 26.984127 | 130 | 0.468235 | [
"ISC"
] | AndresNavarro82/prjtrellis | fuzzers/ECP5/143-bankref8/fuzzer.py | 1,700 | Python |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import warnings
from copy import deepcopy
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug3D(object):
"""Test-time augmentation with multiple scales and flipping.
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple]: Images scales for resizing.
pts_scale_ratio (float | list[float]): Points scale ratios for
resizing.
flip (bool): Whether apply flip augmentation. Defaults to False.
flip_direction (str | list[str]): Flip augmentation directions
for images, options are "horizontal" and "vertical".
If flip_direction is list, multiple flip augmentations will
be applied. It has no effect when ``flip == False``.
Defaults to "horizontal".
pcd_horizontal_flip (bool): Whether apply horizontal flip augmentation
to point cloud. Defaults to True. Note that it works only when
'flip' is turned on.
pcd_vertical_flip (bool): Whether apply vertical flip augmentation
to point cloud. Defaults to True. Note that it works only when
'flip' is turned on.
"""
def __init__(self,
transforms,
img_scale,
pts_scale_ratio,
flip=False,
flip_direction='horizontal',
pcd_horizontal_flip=False,
pcd_vertical_flip=False):
self.transforms = Compose(transforms)
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
self.pts_scale_ratio = pts_scale_ratio \
if isinstance(pts_scale_ratio, list) else[float(pts_scale_ratio)]
assert mmcv.is_list_of(self.img_scale, tuple)
assert mmcv.is_list_of(self.pts_scale_ratio, float)
self.flip = flip
self.pcd_horizontal_flip = pcd_horizontal_flip
self.pcd_vertical_flip = pcd_vertical_flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip and not any([(t['type'] == 'RandomFlip3D'
or t['type'] == 'RandomFlip')
for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to augment common fields in results.
Args:
results (dict): Result dict contains the data to augment.
Returns:
dict: The result dict contains the data that is augmented with \
different scales and flips.
"""
aug_data = []
# modified from `flip_aug = [False, True] if self.flip else [False]`
# to reduce unnecessary scenes when using double flip augmentation
# during test time
flip_aug = [True] if self.flip else [False]
pcd_horizontal_flip_aug = [False, True] \
if self.flip and self.pcd_horizontal_flip else [False]
pcd_vertical_flip_aug = [False, True] \
if self.flip and self.pcd_vertical_flip else [False]
for scale in self.img_scale:
for pts_scale_ratio in self.pts_scale_ratio:
for flip in flip_aug:
for pcd_horizontal_flip in pcd_horizontal_flip_aug:
for pcd_vertical_flip in pcd_vertical_flip_aug:
for direction in self.flip_direction:
# results.copy will cause bug
# since it is shallow copy
_results = deepcopy(results)
_results['scale'] = scale
_results['flip'] = flip
_results['pcd_scale_factor'] = \
pts_scale_ratio
_results['flip_direction'] = direction
_results['pcd_horizontal_flip'] = \
pcd_horizontal_flip
_results['pcd_vertical_flip'] = \
pcd_vertical_flip
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'pts_scale_ratio={self.pts_scale_ratio}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
| 45.033058 | 78 | 0.576436 | [
"Apache-2.0"
] | Comverser/mmdetection3d | mmdet3d/datasets/pipelines/test_time_aug.py | 5,449 | Python |
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ParticleDecays:limitTau0 = off',
'ParticleDecays:tau0Max = 10000000',
'HiddenValley:ffbar2Zv = on',
'HiddenValley:Ngauge = 3',
'4900023:mWidth = 0.01',
'HiddenValley:pTminFSR = .1',
'HiddenValley:alphaFSR = .8',
'HiddenValley:FSR = on',
'HiddenValley:fragment = on',
'HiddenValley:probVector = 0',
'PartonLevel:MPI = on',
'PartonLevel:ISR = on',
'PartonLevel:FSR = on',
'HadronLevel:Hadronize = on',
'4900023:onMode = off',
'4900023:onIfAny = 4900101',
'4900023:m0 = 300', #Z' mass
'4900101:m0 = .5',
'4900111:m0 = 20', #Dark Pion Mass
'4900111:mayDecay = on',
'4900111:addChannel 1 1. 0 22 22', #force dark pion to decay to diphotons
'4900111:tau0 = 500', #Dark pion lifetime in mm
'4900211:mayDecay = off',
'-4900211:mayDecay = off'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| 44.065217 | 93 | 0.513567 | [
"MIT"
] | jking79/Timing | GEN_SIM/Configuration/GenProduction/python/ThirteenTeV/HVDS/HVDS_MZP300_MDP20_Ctau500mm_Pythia8_13TeV_cff.py | 2,027 | Python |
# code is based on https://github.com/katerakelly/pytorch-maml
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch
from torch.utils.data import DataLoader,Dataset
import random
import os
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data.sampler import Sampler
def imshow(img):
npimg = img.numpy()
plt.axis("off")
plt.imshow(np.transpose(npimg,(1,2,0)))
plt.show()
class Rotate(object):
def __init__(self, angle):
self.angle = angle
def __call__(self, x, mode="reflect"):
x = x.rotate(self.angle)
return x
def mini_imagenet_folders():
train_folder = './train'
test_folder = './test'
metatrain_folders = [os.path.join(train_folder, label) \
for label in os.listdir(train_folder) \
if os.path.isdir(os.path.join(train_folder, label)) \
]
metatest_folders = [os.path.join(test_folder, label) \
for label in os.listdir(test_folder) \
if os.path.isdir(os.path.join(test_folder, label)) \
]
random.seed(1)
random.shuffle(metatrain_folders)
random.shuffle(metatest_folders)
return metatrain_folders,metatest_folders
class MiniImagenetTask(object):
def __init__(self, character_folders, num_classes, train_num,test_num):
self.character_folders = character_folders
self.num_classes = num_classes
self.train_num = train_num
self.test_num = test_num
class_folders = random.sample(self.character_folders,self.num_classes)
labels = np.array(range(len(class_folders)))
labels = dict(zip(class_folders, labels))
samples = dict()
self.train_roots = []
self.test_roots = []
for c in class_folders:
temp = [os.path.join(c, x) for x in os.listdir(c)]
samples[c] = random.sample(temp, len(temp))
random.shuffle(samples[c])
self.train_roots += samples[c][:train_num]
self.test_roots += samples[c][train_num:train_num+test_num]
self.train_labels = [labels[self.get_class(x)] for x in self.train_roots]
self.test_labels = [labels[self.get_class(x)] for x in self.test_roots]
def get_class(self, sample):
return os.path.join(*sample.split('/')[:-1])
class FewShotDataset(Dataset):
def __init__(self, task, split='train', transform=None, target_transform=None):
self.transform = transform # Torch operations on the input image
self.target_transform = target_transform
self.task = task
self.split = split
self.image_roots = self.task.train_roots if self.split == 'train' else self.task.test_roots
self.labels = self.task.train_labels if self.split == 'train' else self.task.test_labels
def __len__(self):
return len(self.image_roots)
def __getitem__(self, idx):
raise NotImplementedError("This is an abstract class. Subclass this class for your particular dataset.")
class MiniImagenet(FewShotDataset):
def __init__(self, *args, **kwargs):
super(MiniImagenet, self).__init__(*args, **kwargs)
def __getitem__(self, idx):
image_root = self.image_roots[idx]
image = Image.open(image_root)
image = image.convert('RGB')
if self.transform is not None:
image = self.transform(image)
label = self.labels[idx]
if self.target_transform is not None:
label = self.target_transform(label)
return image, label, image_root
class ClassBalancedSampler(Sampler):
''' Samples 'num_inst' examples each from 'num_cl' pools
of examples of size 'num_per_class' '''
def __init__(self, num_per_class, num_cl, num_inst,shuffle=True):
self.num_per_class = num_per_class
self.num_cl = num_cl
self.num_inst = num_inst
self.shuffle = shuffle
def __iter__(self):
# return a single list of indices, assuming that items will be grouped by class
if self.shuffle:
batch = [[i+j*self.num_inst for i in torch.randperm(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)]
else:
batch = [[i+j*self.num_inst for i in range(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)]
batch = [item for sublist in batch for item in sublist]
if self.shuffle:
random.shuffle(batch)
return iter(batch)
def __len__(self):
return 1
def get_mini_imagenet_data_loader(task, num_per_class=1, split='train',shuffle = False):
normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426])
dataset = MiniImagenet(task,split=split,transform=transforms.Compose([transforms.ToTensor(),normalize]))
if split == 'train':
sampler = ClassBalancedSampler(num_per_class, task.num_classes, task.train_num,shuffle=shuffle)
else:
sampler = ClassBalancedSampler(num_per_class, task.num_classes, task.test_num,shuffle=shuffle)
loader = DataLoader(dataset, batch_size=num_per_class*task.num_classes, sampler=sampler)
return loader
| 34.761589 | 129 | 0.668508 | [
"MIT"
] | WendyBaiYunwei/FSL | miniimgnet/KD-gan/task_generator.py | 5,249 | Python |
# coding: utf-8
"""
Seldon Deploy API
API to interact and manage the lifecycle of your machine learning models deployed through Seldon Deploy. # noqa: E501
OpenAPI spec version: v1alpha1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import seldon_deploy_sdk
from seldon_deploy_sdk.models.cinder_volume_source import CinderVolumeSource # noqa: E501
from seldon_deploy_sdk.rest import ApiException
class TestCinderVolumeSource(unittest.TestCase):
"""CinderVolumeSource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCinderVolumeSource(self):
"""Test CinderVolumeSource"""
# FIXME: construct object with mandatory attributes with example values
# model = seldon_deploy_sdk.models.cinder_volume_source.CinderVolumeSource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.756098 | 122 | 0.730049 | [
"Apache-2.0"
] | RafalSkolasinski/seldon-deploy-client | python/test/test_cinder_volume_source.py | 1,015 | Python |
import whois
def get_whois(domain):
try:
query = whois.query(domain)
assert isinstance(query, whois._3_adjust.Domain)
return query.__dict__
except:
pass
return None
def get_scans(domain):
url = "http://" + domain
urls = [url]
scans = vt.get_url_reports([url])[url]['scans']
positive, negative = [], []
for key, val in scans.items():
if val["detected"]:
negative.append(key)
else:
positive.append(key)
return positive, negative, len(positive), len(negative)
if __name__ == '__main__':
# print('test domain: microsoft.com')
# print(get_whois('microsoft.com'))
# print(get_scans('pxxfmjhosgqqs.com'))
pass | 22.90625 | 59 | 0.601637 | [
"MIT"
] | sudo-rushil/CNN-LSTM_Domain_Classifier | intel_query.py | 733 | Python |
# Generated file, please do not change!!!
import typing
from ...models.error import ErrorResponse
from ...models.shipping_method import ShippingMethodPagedQueryResponse
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyShippingMethodsMatchingCartRequestBuilder:
_client: "BaseClient"
_project_key: str
def __init__(
self,
project_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._client = client
def get(
self,
*,
cart_id: str,
expand: typing.List["str"] = None,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["ShippingMethodPagedQueryResponse"]:
headers = {} if headers is None else headers
response = self._client._get(
endpoint=f"/{self._project_key}/shipping-methods/matching-cart",
params={"cartId": cart_id, "expand": expand},
headers=headers,
options=options,
)
if response.status_code == 200:
return ShippingMethodPagedQueryResponse.deserialize(response.json())
elif response.status_code in (400, 401, 403, 500, 503):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif response.status_code == 404:
return None
raise ValueError("Unhandled status code %s", response.status_code)
| 32.510638 | 80 | 0.643979 | [
"MIT"
] | lime-green/commercetools-python-sdk | src/commercetools/platform/client/matching_cart/by_project_key_shipping_methods_matching_cart_request_builder.py | 1,528 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class SubResource(Model):
"""Reference to another subresource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:vartype name: str
:ivar type: Resource type
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SubResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class AgentPool(SubResource):
"""Agent Pool.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param count: Required. Number of agents (VMs) to host docker containers.
Allowed values must be in the range of 1 to 100 (inclusive). The default
value is 1. . Default value: 1 .
:type count: int
:param vm_size: Required. Size of agent VMs. Possible values include:
'Standard_A1', 'Standard_A10', 'Standard_A11', 'Standard_A1_v2',
'Standard_A2', 'Standard_A2_v2', 'Standard_A2m_v2', 'Standard_A3',
'Standard_A4', 'Standard_A4_v2', 'Standard_A4m_v2', 'Standard_A5',
'Standard_A6', 'Standard_A7', 'Standard_A8', 'Standard_A8_v2',
'Standard_A8m_v2', 'Standard_A9', 'Standard_B2ms', 'Standard_B2s',
'Standard_B4ms', 'Standard_B8ms', 'Standard_D1', 'Standard_D11',
'Standard_D11_v2', 'Standard_D11_v2_Promo', 'Standard_D12',
'Standard_D12_v2', 'Standard_D12_v2_Promo', 'Standard_D13',
'Standard_D13_v2', 'Standard_D13_v2_Promo', 'Standard_D14',
'Standard_D14_v2', 'Standard_D14_v2_Promo', 'Standard_D15_v2',
'Standard_D16_v3', 'Standard_D16s_v3', 'Standard_D1_v2', 'Standard_D2',
'Standard_D2_v2', 'Standard_D2_v2_Promo', 'Standard_D2_v3',
'Standard_D2s_v3', 'Standard_D3', 'Standard_D32_v3', 'Standard_D32s_v3',
'Standard_D3_v2', 'Standard_D3_v2_Promo', 'Standard_D4', 'Standard_D4_v2',
'Standard_D4_v2_Promo', 'Standard_D4_v3', 'Standard_D4s_v3',
'Standard_D5_v2', 'Standard_D5_v2_Promo', 'Standard_D64_v3',
'Standard_D64s_v3', 'Standard_D8_v3', 'Standard_D8s_v3', 'Standard_DS1',
'Standard_DS11', 'Standard_DS11_v2', 'Standard_DS11_v2_Promo',
'Standard_DS12', 'Standard_DS12_v2', 'Standard_DS12_v2_Promo',
'Standard_DS13', 'Standard_DS13-2_v2', 'Standard_DS13-4_v2',
'Standard_DS13_v2', 'Standard_DS13_v2_Promo', 'Standard_DS14',
'Standard_DS14-4_v2', 'Standard_DS14-8_v2', 'Standard_DS14_v2',
'Standard_DS14_v2_Promo', 'Standard_DS15_v2', 'Standard_DS1_v2',
'Standard_DS2', 'Standard_DS2_v2', 'Standard_DS2_v2_Promo',
'Standard_DS3', 'Standard_DS3_v2', 'Standard_DS3_v2_Promo',
'Standard_DS4', 'Standard_DS4_v2', 'Standard_DS4_v2_Promo',
'Standard_DS5_v2', 'Standard_DS5_v2_Promo', 'Standard_E16_v3',
'Standard_E16s_v3', 'Standard_E2_v3', 'Standard_E2s_v3',
'Standard_E32-16s_v3', 'Standard_E32-8s_v3', 'Standard_E32_v3',
'Standard_E32s_v3', 'Standard_E4_v3', 'Standard_E4s_v3',
'Standard_E64-16s_v3', 'Standard_E64-32s_v3', 'Standard_E64_v3',
'Standard_E64s_v3', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_F1',
'Standard_F16', 'Standard_F16s', 'Standard_F16s_v2', 'Standard_F1s',
'Standard_F2', 'Standard_F2s', 'Standard_F2s_v2', 'Standard_F32s_v2',
'Standard_F4', 'Standard_F4s', 'Standard_F4s_v2', 'Standard_F64s_v2',
'Standard_F72s_v2', 'Standard_F8', 'Standard_F8s', 'Standard_F8s_v2',
'Standard_G1', 'Standard_G2', 'Standard_G3', 'Standard_G4', 'Standard_G5',
'Standard_GS1', 'Standard_GS2', 'Standard_GS3', 'Standard_GS4',
'Standard_GS4-4', 'Standard_GS4-8', 'Standard_GS5', 'Standard_GS5-16',
'Standard_GS5-8', 'Standard_H16', 'Standard_H16m', 'Standard_H16mr',
'Standard_H16r', 'Standard_H8', 'Standard_H8m', 'Standard_L16s',
'Standard_L32s', 'Standard_L4s', 'Standard_L8s', 'Standard_M128-32ms',
'Standard_M128-64ms', 'Standard_M128ms', 'Standard_M128s',
'Standard_M64-16ms', 'Standard_M64-32ms', 'Standard_M64ms',
'Standard_M64s', 'Standard_NC12', 'Standard_NC12s_v2',
'Standard_NC12s_v3', 'Standard_NC24', 'Standard_NC24r',
'Standard_NC24rs_v2', 'Standard_NC24rs_v3', 'Standard_NC24s_v2',
'Standard_NC24s_v3', 'Standard_NC6', 'Standard_NC6s_v2',
'Standard_NC6s_v3', 'Standard_ND12s', 'Standard_ND24rs', 'Standard_ND24s',
'Standard_ND6s', 'Standard_NV12', 'Standard_NV24', 'Standard_NV6'
:type vm_size: str or
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceVMSizeTypes
:param os_disk_size_gb: OS Disk Size in GB to be used to specify the disk
size for every machine in this master/agent pool. If you specify 0, it
will apply the default osDisk size according to the vmSize specified.
:type os_disk_size_gb: int
:param vnet_subnet_id: VNet SubnetID specifies the VNet's subnet
identifier.
:type vnet_subnet_id: str
:param max_pods: Maximum number of pods that can run on a node.
:type max_pods: int
:param os_type: OsType to be used to specify os type. Choose from Linux
and Windows. Default to Linux. Possible values include: 'Linux',
'Windows'. Default value: "Linux" .
:type os_type: str or
~azure.mgmt.containerservice.v2019_02_01.models.OSType
:param max_count: Maximum number of nodes for auto-scaling
:type max_count: int
:param min_count: Minimum number of nodes for auto-scaling
:type min_count: int
:param enable_auto_scaling: Whether to enable auto-scaler
:type enable_auto_scaling: bool
:param agent_pool_type: AgentPoolType represents types of an agent pool.
Possible values include: 'VirtualMachineScaleSets', 'AvailabilitySet'
:type agent_pool_type: str or
~azure.mgmt.containerservice.v2019_02_01.models.AgentPoolType
:param orchestrator_version: Version of orchestrator specified when
creating the managed cluster.
:type orchestrator_version: str
:ivar provisioning_state: The current deployment or provisioning state,
which only appears in the response.
:vartype provisioning_state: str
:param availability_zones: (PREVIEW) Availability zones for nodes. Must
use VirtualMachineScaleSets AgentPoolType.
:type availability_zones: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'count': {'required': True, 'maximum': 100, 'minimum': 1},
'vm_size': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'count': {'key': 'properties.count', 'type': 'int'},
'vm_size': {'key': 'properties.vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'properties.osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'properties.vnetSubnetID', 'type': 'str'},
'max_pods': {'key': 'properties.maxPods', 'type': 'int'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'max_count': {'key': 'properties.maxCount', 'type': 'int'},
'min_count': {'key': 'properties.minCount', 'type': 'int'},
'enable_auto_scaling': {'key': 'properties.enableAutoScaling', 'type': 'bool'},
'agent_pool_type': {'key': 'properties.type', 'type': 'str'},
'orchestrator_version': {'key': 'properties.orchestratorVersion', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'availability_zones': {'key': 'properties.availabilityZones', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(AgentPool, self).__init__(**kwargs)
self.count = kwargs.get('count', 1)
self.vm_size = kwargs.get('vm_size', None)
self.os_disk_size_gb = kwargs.get('os_disk_size_gb', None)
self.vnet_subnet_id = kwargs.get('vnet_subnet_id', None)
self.max_pods = kwargs.get('max_pods', None)
self.os_type = kwargs.get('os_type', "Linux")
self.max_count = kwargs.get('max_count', None)
self.min_count = kwargs.get('min_count', None)
self.enable_auto_scaling = kwargs.get('enable_auto_scaling', None)
self.agent_pool_type = kwargs.get('agent_pool_type', None)
self.orchestrator_version = kwargs.get('orchestrator_version', None)
self.provisioning_state = None
self.availability_zones = kwargs.get('availability_zones', None)
class CloudError(Model):
"""An error response from the Container service.
:param error: Details about the error.
:type error:
~azure.mgmt.containerservice.v2019_02_01.models.CloudErrorBody
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'CloudErrorBody'},
}
def __init__(self, **kwargs):
super(CloudError, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class CloudErrorException(HttpOperationError):
"""Server responsed with exception of type: 'CloudError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(CloudErrorException, self).__init__(deserialize, response, 'CloudError', *args)
class CloudErrorBody(Model):
"""An error response from the Container service.
:param code: An identifier for the error. Codes are invariant and are
intended to be consumed programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable
for display in a user interface.
:type message: str
:param target: The target of the particular error. For example, the name
of the property in error.
:type target: str
:param details: A list of additional details about the error.
:type details:
list[~azure.mgmt.containerservice.v2019_02_01.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(self, **kwargs):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
class ContainerServiceDiagnosticsProfile(Model):
"""Profile for diagnostics on the container service cluster.
All required parameters must be populated in order to send to Azure.
:param vm_diagnostics: Required. Profile for diagnostics on the container
service VMs.
:type vm_diagnostics:
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceVMDiagnostics
"""
_validation = {
'vm_diagnostics': {'required': True},
}
_attribute_map = {
'vm_diagnostics': {'key': 'vmDiagnostics', 'type': 'ContainerServiceVMDiagnostics'},
}
def __init__(self, **kwargs):
super(ContainerServiceDiagnosticsProfile, self).__init__(**kwargs)
self.vm_diagnostics = kwargs.get('vm_diagnostics', None)
class ContainerServiceLinuxProfile(Model):
"""Profile for Linux VMs in the container service cluster.
All required parameters must be populated in order to send to Azure.
:param admin_username: Required. The administrator username to use for
Linux VMs.
:type admin_username: str
:param ssh: Required. SSH configuration for Linux-based VMs running on
Azure.
:type ssh:
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceSshConfiguration
"""
_validation = {
'admin_username': {'required': True, 'pattern': r'^[A-Za-z][-A-Za-z0-9_]*$'},
'ssh': {'required': True},
}
_attribute_map = {
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'ssh': {'key': 'ssh', 'type': 'ContainerServiceSshConfiguration'},
}
def __init__(self, **kwargs):
super(ContainerServiceLinuxProfile, self).__init__(**kwargs)
self.admin_username = kwargs.get('admin_username', None)
self.ssh = kwargs.get('ssh', None)
class ContainerServiceMasterProfile(Model):
"""Profile for the container service master.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param count: Number of masters (VMs) in the container service cluster.
Allowed values are 1, 3, and 5. The default value is 1. Default value: 1 .
:type count: int
:param dns_prefix: Required. DNS prefix to be used to create the FQDN for
the master pool.
:type dns_prefix: str
:param vm_size: Required. Size of agent VMs. Possible values include:
'Standard_A1', 'Standard_A10', 'Standard_A11', 'Standard_A1_v2',
'Standard_A2', 'Standard_A2_v2', 'Standard_A2m_v2', 'Standard_A3',
'Standard_A4', 'Standard_A4_v2', 'Standard_A4m_v2', 'Standard_A5',
'Standard_A6', 'Standard_A7', 'Standard_A8', 'Standard_A8_v2',
'Standard_A8m_v2', 'Standard_A9', 'Standard_B2ms', 'Standard_B2s',
'Standard_B4ms', 'Standard_B8ms', 'Standard_D1', 'Standard_D11',
'Standard_D11_v2', 'Standard_D11_v2_Promo', 'Standard_D12',
'Standard_D12_v2', 'Standard_D12_v2_Promo', 'Standard_D13',
'Standard_D13_v2', 'Standard_D13_v2_Promo', 'Standard_D14',
'Standard_D14_v2', 'Standard_D14_v2_Promo', 'Standard_D15_v2',
'Standard_D16_v3', 'Standard_D16s_v3', 'Standard_D1_v2', 'Standard_D2',
'Standard_D2_v2', 'Standard_D2_v2_Promo', 'Standard_D2_v3',
'Standard_D2s_v3', 'Standard_D3', 'Standard_D32_v3', 'Standard_D32s_v3',
'Standard_D3_v2', 'Standard_D3_v2_Promo', 'Standard_D4', 'Standard_D4_v2',
'Standard_D4_v2_Promo', 'Standard_D4_v3', 'Standard_D4s_v3',
'Standard_D5_v2', 'Standard_D5_v2_Promo', 'Standard_D64_v3',
'Standard_D64s_v3', 'Standard_D8_v3', 'Standard_D8s_v3', 'Standard_DS1',
'Standard_DS11', 'Standard_DS11_v2', 'Standard_DS11_v2_Promo',
'Standard_DS12', 'Standard_DS12_v2', 'Standard_DS12_v2_Promo',
'Standard_DS13', 'Standard_DS13-2_v2', 'Standard_DS13-4_v2',
'Standard_DS13_v2', 'Standard_DS13_v2_Promo', 'Standard_DS14',
'Standard_DS14-4_v2', 'Standard_DS14-8_v2', 'Standard_DS14_v2',
'Standard_DS14_v2_Promo', 'Standard_DS15_v2', 'Standard_DS1_v2',
'Standard_DS2', 'Standard_DS2_v2', 'Standard_DS2_v2_Promo',
'Standard_DS3', 'Standard_DS3_v2', 'Standard_DS3_v2_Promo',
'Standard_DS4', 'Standard_DS4_v2', 'Standard_DS4_v2_Promo',
'Standard_DS5_v2', 'Standard_DS5_v2_Promo', 'Standard_E16_v3',
'Standard_E16s_v3', 'Standard_E2_v3', 'Standard_E2s_v3',
'Standard_E32-16s_v3', 'Standard_E32-8s_v3', 'Standard_E32_v3',
'Standard_E32s_v3', 'Standard_E4_v3', 'Standard_E4s_v3',
'Standard_E64-16s_v3', 'Standard_E64-32s_v3', 'Standard_E64_v3',
'Standard_E64s_v3', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_F1',
'Standard_F16', 'Standard_F16s', 'Standard_F16s_v2', 'Standard_F1s',
'Standard_F2', 'Standard_F2s', 'Standard_F2s_v2', 'Standard_F32s_v2',
'Standard_F4', 'Standard_F4s', 'Standard_F4s_v2', 'Standard_F64s_v2',
'Standard_F72s_v2', 'Standard_F8', 'Standard_F8s', 'Standard_F8s_v2',
'Standard_G1', 'Standard_G2', 'Standard_G3', 'Standard_G4', 'Standard_G5',
'Standard_GS1', 'Standard_GS2', 'Standard_GS3', 'Standard_GS4',
'Standard_GS4-4', 'Standard_GS4-8', 'Standard_GS5', 'Standard_GS5-16',
'Standard_GS5-8', 'Standard_H16', 'Standard_H16m', 'Standard_H16mr',
'Standard_H16r', 'Standard_H8', 'Standard_H8m', 'Standard_L16s',
'Standard_L32s', 'Standard_L4s', 'Standard_L8s', 'Standard_M128-32ms',
'Standard_M128-64ms', 'Standard_M128ms', 'Standard_M128s',
'Standard_M64-16ms', 'Standard_M64-32ms', 'Standard_M64ms',
'Standard_M64s', 'Standard_NC12', 'Standard_NC12s_v2',
'Standard_NC12s_v3', 'Standard_NC24', 'Standard_NC24r',
'Standard_NC24rs_v2', 'Standard_NC24rs_v3', 'Standard_NC24s_v2',
'Standard_NC24s_v3', 'Standard_NC6', 'Standard_NC6s_v2',
'Standard_NC6s_v3', 'Standard_ND12s', 'Standard_ND24rs', 'Standard_ND24s',
'Standard_ND6s', 'Standard_NV12', 'Standard_NV24', 'Standard_NV6'
:type vm_size: str or
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceVMSizeTypes
:param os_disk_size_gb: OS Disk Size in GB to be used to specify the disk
size for every machine in this master/agent pool. If you specify 0, it
will apply the default osDisk size according to the vmSize specified.
:type os_disk_size_gb: int
:param vnet_subnet_id: VNet SubnetID specifies the VNet's subnet
identifier.
:type vnet_subnet_id: str
:param first_consecutive_static_ip: FirstConsecutiveStaticIP used to
specify the first static ip of masters. Default value: "10.240.255.5" .
:type first_consecutive_static_ip: str
:param storage_profile: Storage profile specifies what kind of storage
used. Choose from StorageAccount and ManagedDisks. Leave it empty, we will
choose for you based on the orchestrator choice. Possible values include:
'StorageAccount', 'ManagedDisks'
:type storage_profile: str or
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceStorageProfileTypes
:ivar fqdn: FQDN for the master pool.
:vartype fqdn: str
"""
_validation = {
'dns_prefix': {'required': True},
'vm_size': {'required': True},
'fqdn': {'readonly': True},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'dns_prefix': {'key': 'dnsPrefix', 'type': 'str'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'vnetSubnetID', 'type': 'str'},
'first_consecutive_static_ip': {'key': 'firstConsecutiveStaticIP', 'type': 'str'},
'storage_profile': {'key': 'storageProfile', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceMasterProfile, self).__init__(**kwargs)
self.count = kwargs.get('count', 1)
self.dns_prefix = kwargs.get('dns_prefix', None)
self.vm_size = kwargs.get('vm_size', None)
self.os_disk_size_gb = kwargs.get('os_disk_size_gb', None)
self.vnet_subnet_id = kwargs.get('vnet_subnet_id', None)
self.first_consecutive_static_ip = kwargs.get('first_consecutive_static_ip', "10.240.255.5")
self.storage_profile = kwargs.get('storage_profile', None)
self.fqdn = None
class ContainerServiceNetworkProfile(Model):
"""Profile of network configuration.
:param network_plugin: Network plugin used for building Kubernetes
network. Possible values include: 'azure', 'kubenet'. Default value:
"kubenet" .
:type network_plugin: str or
~azure.mgmt.containerservice.v2019_02_01.models.NetworkPlugin
:param network_policy: Network policy used for building Kubernetes
network. Possible values include: 'calico', 'azure'
:type network_policy: str or
~azure.mgmt.containerservice.v2019_02_01.models.NetworkPolicy
:param pod_cidr: A CIDR notation IP range from which to assign pod IPs
when kubenet is used. Default value: "10.244.0.0/16" .
:type pod_cidr: str
:param service_cidr: A CIDR notation IP range from which to assign service
cluster IPs. It must not overlap with any Subnet IP ranges. Default value:
"10.0.0.0/16" .
:type service_cidr: str
:param dns_service_ip: An IP address assigned to the Kubernetes DNS
service. It must be within the Kubernetes service address range specified
in serviceCidr. Default value: "10.0.0.10" .
:type dns_service_ip: str
:param docker_bridge_cidr: A CIDR notation IP range assigned to the Docker
bridge network. It must not overlap with any Subnet IP ranges or the
Kubernetes service address range. Default value: "172.17.0.1/16" .
:type docker_bridge_cidr: str
"""
_validation = {
'pod_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
'service_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
'dns_service_ip': {'pattern': r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'},
'docker_bridge_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
}
_attribute_map = {
'network_plugin': {'key': 'networkPlugin', 'type': 'str'},
'network_policy': {'key': 'networkPolicy', 'type': 'str'},
'pod_cidr': {'key': 'podCidr', 'type': 'str'},
'service_cidr': {'key': 'serviceCidr', 'type': 'str'},
'dns_service_ip': {'key': 'dnsServiceIP', 'type': 'str'},
'docker_bridge_cidr': {'key': 'dockerBridgeCidr', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceNetworkProfile, self).__init__(**kwargs)
self.network_plugin = kwargs.get('network_plugin', "kubenet")
self.network_policy = kwargs.get('network_policy', None)
self.pod_cidr = kwargs.get('pod_cidr', "10.244.0.0/16")
self.service_cidr = kwargs.get('service_cidr', "10.0.0.0/16")
self.dns_service_ip = kwargs.get('dns_service_ip', "10.0.0.10")
self.docker_bridge_cidr = kwargs.get('docker_bridge_cidr', "172.17.0.1/16")
class ContainerServiceSshConfiguration(Model):
"""SSH configuration for Linux-based VMs running on Azure.
All required parameters must be populated in order to send to Azure.
:param public_keys: Required. The list of SSH public keys used to
authenticate with Linux-based VMs. Only expect one key specified.
:type public_keys:
list[~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceSshPublicKey]
"""
_validation = {
'public_keys': {'required': True},
}
_attribute_map = {
'public_keys': {'key': 'publicKeys', 'type': '[ContainerServiceSshPublicKey]'},
}
def __init__(self, **kwargs):
super(ContainerServiceSshConfiguration, self).__init__(**kwargs)
self.public_keys = kwargs.get('public_keys', None)
class ContainerServiceSshPublicKey(Model):
"""Contains information about SSH certificate public key data.
All required parameters must be populated in order to send to Azure.
:param key_data: Required. Certificate public key used to authenticate
with VMs through SSH. The certificate must be in PEM format with or
without headers.
:type key_data: str
"""
_validation = {
'key_data': {'required': True},
}
_attribute_map = {
'key_data': {'key': 'keyData', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceSshPublicKey, self).__init__(**kwargs)
self.key_data = kwargs.get('key_data', None)
class ContainerServiceVMDiagnostics(Model):
"""Profile for diagnostics on the container service VMs.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Whether the VM diagnostic agent is provisioned
on the VM.
:type enabled: bool
:ivar storage_uri: The URI of the storage account where diagnostics are
stored.
:vartype storage_uri: str
"""
_validation = {
'enabled': {'required': True},
'storage_uri': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'storage_uri': {'key': 'storageUri', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceVMDiagnostics, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.storage_uri = None
class ContainerServiceWindowsProfile(Model):
"""Profile for Windows VMs in the container service cluster.
All required parameters must be populated in order to send to Azure.
:param admin_username: Required. The administrator username to use for
Windows VMs.
:type admin_username: str
:param admin_password: Required. The administrator password to use for
Windows VMs.
:type admin_password: str
"""
_validation = {
'admin_username': {'required': True, 'pattern': r'^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$'},
'admin_password': {'required': True, 'pattern': r'^(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%\^&\*\(\)])[a-zA-Z\d!@#$%\^&\*\(\)]{12,123}$'},
}
_attribute_map = {
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceWindowsProfile, self).__init__(**kwargs)
self.admin_username = kwargs.get('admin_username', None)
self.admin_password = kwargs.get('admin_password', None)
class CredentialResult(Model):
"""The credential result response.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: The name of the credential.
:vartype name: str
:ivar value: Base64-encoded Kubernetes configuration file.
:vartype value: bytearray
"""
_validation = {
'name': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'bytearray'},
}
def __init__(self, **kwargs):
super(CredentialResult, self).__init__(**kwargs)
self.name = None
self.value = None
class CredentialResults(Model):
"""The list of credential result response.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar kubeconfigs: Base64-encoded Kubernetes configuration file.
:vartype kubeconfigs:
list[~azure.mgmt.containerservice.v2019_02_01.models.CredentialResult]
"""
_validation = {
'kubeconfigs': {'readonly': True},
}
_attribute_map = {
'kubeconfigs': {'key': 'kubeconfigs', 'type': '[CredentialResult]'},
}
def __init__(self, **kwargs):
super(CredentialResults, self).__init__(**kwargs)
self.kubeconfigs = None
class Resource(Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class ManagedCluster(Resource):
"""Managed cluster.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:ivar provisioning_state: The current deployment or provisioning state,
which only appears in the response.
:vartype provisioning_state: str
:param kubernetes_version: Version of Kubernetes specified when creating
the managed cluster.
:type kubernetes_version: str
:param dns_prefix: DNS prefix specified when creating the managed cluster.
:type dns_prefix: str
:ivar fqdn: FQDN for the master pool.
:vartype fqdn: str
:param agent_pool_profiles: Properties of the agent pool.
:type agent_pool_profiles:
list[~azure.mgmt.containerservice.v2019_02_01.models.ManagedClusterAgentPoolProfile]
:param linux_profile: Profile for Linux VMs in the container service
cluster.
:type linux_profile:
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceLinuxProfile
:param service_principal_profile: Information about a service principal
identity for the cluster to use for manipulating Azure APIs.
:type service_principal_profile:
~azure.mgmt.containerservice.v2019_02_01.models.ManagedClusterServicePrincipalProfile
:param addon_profiles: Profile of managed cluster add-on.
:type addon_profiles: dict[str,
~azure.mgmt.containerservice.v2019_02_01.models.ManagedClusterAddonProfile]
:ivar node_resource_group: Name of the resource group containing agent
pool nodes.
:vartype node_resource_group: str
:param enable_rbac: Whether to enable Kubernetes Role-Based Access
Control.
:type enable_rbac: bool
:param enable_pod_security_policy: (DEPRECATING) Whether to enable
Kubernetes pod security policy (preview). This feature is set for removal
on October 15th, 2020. Learn more at aka.ms/aks/azpodpolicy.
:type enable_pod_security_policy: bool
:param network_profile: Profile of network configuration.
:type network_profile:
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceNetworkProfile
:param aad_profile: Profile of Azure Active Directory configuration.
:type aad_profile:
~azure.mgmt.containerservice.v2019_02_01.models.ManagedClusterAADProfile
:param api_server_authorized_ip_ranges: (PREVIEW) Authorized IP Ranges to
kubernetes API server.
:type api_server_authorized_ip_ranges: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'fqdn': {'readonly': True},
'node_resource_group': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'kubernetes_version': {'key': 'properties.kubernetesVersion', 'type': 'str'},
'dns_prefix': {'key': 'properties.dnsPrefix', 'type': 'str'},
'fqdn': {'key': 'properties.fqdn', 'type': 'str'},
'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[ManagedClusterAgentPoolProfile]'},
'linux_profile': {'key': 'properties.linuxProfile', 'type': 'ContainerServiceLinuxProfile'},
'service_principal_profile': {'key': 'properties.servicePrincipalProfile', 'type': 'ManagedClusterServicePrincipalProfile'},
'addon_profiles': {'key': 'properties.addonProfiles', 'type': '{ManagedClusterAddonProfile}'},
'node_resource_group': {'key': 'properties.nodeResourceGroup', 'type': 'str'},
'enable_rbac': {'key': 'properties.enableRBAC', 'type': 'bool'},
'enable_pod_security_policy': {'key': 'properties.enablePodSecurityPolicy', 'type': 'bool'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'ContainerServiceNetworkProfile'},
'aad_profile': {'key': 'properties.aadProfile', 'type': 'ManagedClusterAADProfile'},
'api_server_authorized_ip_ranges': {'key': 'properties.apiServerAuthorizedIPRanges', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ManagedCluster, self).__init__(**kwargs)
self.provisioning_state = None
self.kubernetes_version = kwargs.get('kubernetes_version', None)
self.dns_prefix = kwargs.get('dns_prefix', None)
self.fqdn = None
self.agent_pool_profiles = kwargs.get('agent_pool_profiles', None)
self.linux_profile = kwargs.get('linux_profile', None)
self.service_principal_profile = kwargs.get('service_principal_profile', None)
self.addon_profiles = kwargs.get('addon_profiles', None)
self.node_resource_group = None
self.enable_rbac = kwargs.get('enable_rbac', None)
self.enable_pod_security_policy = kwargs.get('enable_pod_security_policy', None)
self.network_profile = kwargs.get('network_profile', None)
self.aad_profile = kwargs.get('aad_profile', None)
self.api_server_authorized_ip_ranges = kwargs.get('api_server_authorized_ip_ranges', None)
class ManagedClusterAADProfile(Model):
"""AADProfile specifies attributes for Azure Active Directory integration.
All required parameters must be populated in order to send to Azure.
:param client_app_id: Required. The client AAD application ID.
:type client_app_id: str
:param server_app_id: Required. The server AAD application ID.
:type server_app_id: str
:param server_app_secret: The server AAD application secret.
:type server_app_secret: str
:param tenant_id: The AAD tenant ID to use for authentication. If not
specified, will use the tenant of the deployment subscription.
:type tenant_id: str
"""
_validation = {
'client_app_id': {'required': True},
'server_app_id': {'required': True},
}
_attribute_map = {
'client_app_id': {'key': 'clientAppID', 'type': 'str'},
'server_app_id': {'key': 'serverAppID', 'type': 'str'},
'server_app_secret': {'key': 'serverAppSecret', 'type': 'str'},
'tenant_id': {'key': 'tenantID', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ManagedClusterAADProfile, self).__init__(**kwargs)
self.client_app_id = kwargs.get('client_app_id', None)
self.server_app_id = kwargs.get('server_app_id', None)
self.server_app_secret = kwargs.get('server_app_secret', None)
self.tenant_id = kwargs.get('tenant_id', None)
class ManagedClusterAccessProfile(Resource):
"""Managed cluster Access Profile.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:param kube_config: Base64-encoded Kubernetes configuration file.
:type kube_config: bytearray
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kube_config': {'key': 'properties.kubeConfig', 'type': 'bytearray'},
}
def __init__(self, **kwargs):
super(ManagedClusterAccessProfile, self).__init__(**kwargs)
self.kube_config = kwargs.get('kube_config', None)
class ManagedClusterAddonProfile(Model):
"""A Kubernetes add-on profile for a managed cluster.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Whether the add-on is enabled or not.
:type enabled: bool
:param config: Key-value pairs for configuring an add-on.
:type config: dict[str, str]
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'config': {'key': 'config', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(ManagedClusterAddonProfile, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.config = kwargs.get('config', None)
class ManagedClusterAgentPoolProfileProperties(Model):
"""Properties for the container service agent pool profile.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param count: Required. Number of agents (VMs) to host docker containers.
Allowed values must be in the range of 1 to 100 (inclusive). The default
value is 1. . Default value: 1 .
:type count: int
:param vm_size: Required. Size of agent VMs. Possible values include:
'Standard_A1', 'Standard_A10', 'Standard_A11', 'Standard_A1_v2',
'Standard_A2', 'Standard_A2_v2', 'Standard_A2m_v2', 'Standard_A3',
'Standard_A4', 'Standard_A4_v2', 'Standard_A4m_v2', 'Standard_A5',
'Standard_A6', 'Standard_A7', 'Standard_A8', 'Standard_A8_v2',
'Standard_A8m_v2', 'Standard_A9', 'Standard_B2ms', 'Standard_B2s',
'Standard_B4ms', 'Standard_B8ms', 'Standard_D1', 'Standard_D11',
'Standard_D11_v2', 'Standard_D11_v2_Promo', 'Standard_D12',
'Standard_D12_v2', 'Standard_D12_v2_Promo', 'Standard_D13',
'Standard_D13_v2', 'Standard_D13_v2_Promo', 'Standard_D14',
'Standard_D14_v2', 'Standard_D14_v2_Promo', 'Standard_D15_v2',
'Standard_D16_v3', 'Standard_D16s_v3', 'Standard_D1_v2', 'Standard_D2',
'Standard_D2_v2', 'Standard_D2_v2_Promo', 'Standard_D2_v3',
'Standard_D2s_v3', 'Standard_D3', 'Standard_D32_v3', 'Standard_D32s_v3',
'Standard_D3_v2', 'Standard_D3_v2_Promo', 'Standard_D4', 'Standard_D4_v2',
'Standard_D4_v2_Promo', 'Standard_D4_v3', 'Standard_D4s_v3',
'Standard_D5_v2', 'Standard_D5_v2_Promo', 'Standard_D64_v3',
'Standard_D64s_v3', 'Standard_D8_v3', 'Standard_D8s_v3', 'Standard_DS1',
'Standard_DS11', 'Standard_DS11_v2', 'Standard_DS11_v2_Promo',
'Standard_DS12', 'Standard_DS12_v2', 'Standard_DS12_v2_Promo',
'Standard_DS13', 'Standard_DS13-2_v2', 'Standard_DS13-4_v2',
'Standard_DS13_v2', 'Standard_DS13_v2_Promo', 'Standard_DS14',
'Standard_DS14-4_v2', 'Standard_DS14-8_v2', 'Standard_DS14_v2',
'Standard_DS14_v2_Promo', 'Standard_DS15_v2', 'Standard_DS1_v2',
'Standard_DS2', 'Standard_DS2_v2', 'Standard_DS2_v2_Promo',
'Standard_DS3', 'Standard_DS3_v2', 'Standard_DS3_v2_Promo',
'Standard_DS4', 'Standard_DS4_v2', 'Standard_DS4_v2_Promo',
'Standard_DS5_v2', 'Standard_DS5_v2_Promo', 'Standard_E16_v3',
'Standard_E16s_v3', 'Standard_E2_v3', 'Standard_E2s_v3',
'Standard_E32-16s_v3', 'Standard_E32-8s_v3', 'Standard_E32_v3',
'Standard_E32s_v3', 'Standard_E4_v3', 'Standard_E4s_v3',
'Standard_E64-16s_v3', 'Standard_E64-32s_v3', 'Standard_E64_v3',
'Standard_E64s_v3', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_F1',
'Standard_F16', 'Standard_F16s', 'Standard_F16s_v2', 'Standard_F1s',
'Standard_F2', 'Standard_F2s', 'Standard_F2s_v2', 'Standard_F32s_v2',
'Standard_F4', 'Standard_F4s', 'Standard_F4s_v2', 'Standard_F64s_v2',
'Standard_F72s_v2', 'Standard_F8', 'Standard_F8s', 'Standard_F8s_v2',
'Standard_G1', 'Standard_G2', 'Standard_G3', 'Standard_G4', 'Standard_G5',
'Standard_GS1', 'Standard_GS2', 'Standard_GS3', 'Standard_GS4',
'Standard_GS4-4', 'Standard_GS4-8', 'Standard_GS5', 'Standard_GS5-16',
'Standard_GS5-8', 'Standard_H16', 'Standard_H16m', 'Standard_H16mr',
'Standard_H16r', 'Standard_H8', 'Standard_H8m', 'Standard_L16s',
'Standard_L32s', 'Standard_L4s', 'Standard_L8s', 'Standard_M128-32ms',
'Standard_M128-64ms', 'Standard_M128ms', 'Standard_M128s',
'Standard_M64-16ms', 'Standard_M64-32ms', 'Standard_M64ms',
'Standard_M64s', 'Standard_NC12', 'Standard_NC12s_v2',
'Standard_NC12s_v3', 'Standard_NC24', 'Standard_NC24r',
'Standard_NC24rs_v2', 'Standard_NC24rs_v3', 'Standard_NC24s_v2',
'Standard_NC24s_v3', 'Standard_NC6', 'Standard_NC6s_v2',
'Standard_NC6s_v3', 'Standard_ND12s', 'Standard_ND24rs', 'Standard_ND24s',
'Standard_ND6s', 'Standard_NV12', 'Standard_NV24', 'Standard_NV6'
:type vm_size: str or
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceVMSizeTypes
:param os_disk_size_gb: OS Disk Size in GB to be used to specify the disk
size for every machine in this master/agent pool. If you specify 0, it
will apply the default osDisk size according to the vmSize specified.
:type os_disk_size_gb: int
:param vnet_subnet_id: VNet SubnetID specifies the VNet's subnet
identifier.
:type vnet_subnet_id: str
:param max_pods: Maximum number of pods that can run on a node.
:type max_pods: int
:param os_type: OsType to be used to specify os type. Choose from Linux
and Windows. Default to Linux. Possible values include: 'Linux',
'Windows'. Default value: "Linux" .
:type os_type: str or
~azure.mgmt.containerservice.v2019_02_01.models.OSType
:param max_count: Maximum number of nodes for auto-scaling
:type max_count: int
:param min_count: Minimum number of nodes for auto-scaling
:type min_count: int
:param enable_auto_scaling: Whether to enable auto-scaler
:type enable_auto_scaling: bool
:param type: AgentPoolType represents types of an agent pool. Possible
values include: 'VirtualMachineScaleSets', 'AvailabilitySet'
:type type: str or
~azure.mgmt.containerservice.v2019_02_01.models.AgentPoolType
:param orchestrator_version: Version of orchestrator specified when
creating the managed cluster.
:type orchestrator_version: str
:ivar provisioning_state: The current deployment or provisioning state,
which only appears in the response.
:vartype provisioning_state: str
:param availability_zones: (PREVIEW) Availability zones for nodes. Must
use VirtualMachineScaleSets AgentPoolType.
:type availability_zones: list[str]
"""
_validation = {
'count': {'required': True, 'maximum': 100, 'minimum': 1},
'vm_size': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'vnetSubnetID', 'type': 'str'},
'max_pods': {'key': 'maxPods', 'type': 'int'},
'os_type': {'key': 'osType', 'type': 'str'},
'max_count': {'key': 'maxCount', 'type': 'int'},
'min_count': {'key': 'minCount', 'type': 'int'},
'enable_auto_scaling': {'key': 'enableAutoScaling', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'orchestrator_version': {'key': 'orchestratorVersion', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'availability_zones': {'key': 'availabilityZones', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ManagedClusterAgentPoolProfileProperties, self).__init__(**kwargs)
self.count = kwargs.get('count', 1)
self.vm_size = kwargs.get('vm_size', None)
self.os_disk_size_gb = kwargs.get('os_disk_size_gb', None)
self.vnet_subnet_id = kwargs.get('vnet_subnet_id', None)
self.max_pods = kwargs.get('max_pods', None)
self.os_type = kwargs.get('os_type', "Linux")
self.max_count = kwargs.get('max_count', None)
self.min_count = kwargs.get('min_count', None)
self.enable_auto_scaling = kwargs.get('enable_auto_scaling', None)
self.type = kwargs.get('type', None)
self.orchestrator_version = kwargs.get('orchestrator_version', None)
self.provisioning_state = None
self.availability_zones = kwargs.get('availability_zones', None)
class ManagedClusterAgentPoolProfile(ManagedClusterAgentPoolProfileProperties):
"""Profile for the container service agent pool.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param count: Required. Number of agents (VMs) to host docker containers.
Allowed values must be in the range of 1 to 100 (inclusive). The default
value is 1. . Default value: 1 .
:type count: int
:param vm_size: Required. Size of agent VMs. Possible values include:
'Standard_A1', 'Standard_A10', 'Standard_A11', 'Standard_A1_v2',
'Standard_A2', 'Standard_A2_v2', 'Standard_A2m_v2', 'Standard_A3',
'Standard_A4', 'Standard_A4_v2', 'Standard_A4m_v2', 'Standard_A5',
'Standard_A6', 'Standard_A7', 'Standard_A8', 'Standard_A8_v2',
'Standard_A8m_v2', 'Standard_A9', 'Standard_B2ms', 'Standard_B2s',
'Standard_B4ms', 'Standard_B8ms', 'Standard_D1', 'Standard_D11',
'Standard_D11_v2', 'Standard_D11_v2_Promo', 'Standard_D12',
'Standard_D12_v2', 'Standard_D12_v2_Promo', 'Standard_D13',
'Standard_D13_v2', 'Standard_D13_v2_Promo', 'Standard_D14',
'Standard_D14_v2', 'Standard_D14_v2_Promo', 'Standard_D15_v2',
'Standard_D16_v3', 'Standard_D16s_v3', 'Standard_D1_v2', 'Standard_D2',
'Standard_D2_v2', 'Standard_D2_v2_Promo', 'Standard_D2_v3',
'Standard_D2s_v3', 'Standard_D3', 'Standard_D32_v3', 'Standard_D32s_v3',
'Standard_D3_v2', 'Standard_D3_v2_Promo', 'Standard_D4', 'Standard_D4_v2',
'Standard_D4_v2_Promo', 'Standard_D4_v3', 'Standard_D4s_v3',
'Standard_D5_v2', 'Standard_D5_v2_Promo', 'Standard_D64_v3',
'Standard_D64s_v3', 'Standard_D8_v3', 'Standard_D8s_v3', 'Standard_DS1',
'Standard_DS11', 'Standard_DS11_v2', 'Standard_DS11_v2_Promo',
'Standard_DS12', 'Standard_DS12_v2', 'Standard_DS12_v2_Promo',
'Standard_DS13', 'Standard_DS13-2_v2', 'Standard_DS13-4_v2',
'Standard_DS13_v2', 'Standard_DS13_v2_Promo', 'Standard_DS14',
'Standard_DS14-4_v2', 'Standard_DS14-8_v2', 'Standard_DS14_v2',
'Standard_DS14_v2_Promo', 'Standard_DS15_v2', 'Standard_DS1_v2',
'Standard_DS2', 'Standard_DS2_v2', 'Standard_DS2_v2_Promo',
'Standard_DS3', 'Standard_DS3_v2', 'Standard_DS3_v2_Promo',
'Standard_DS4', 'Standard_DS4_v2', 'Standard_DS4_v2_Promo',
'Standard_DS5_v2', 'Standard_DS5_v2_Promo', 'Standard_E16_v3',
'Standard_E16s_v3', 'Standard_E2_v3', 'Standard_E2s_v3',
'Standard_E32-16s_v3', 'Standard_E32-8s_v3', 'Standard_E32_v3',
'Standard_E32s_v3', 'Standard_E4_v3', 'Standard_E4s_v3',
'Standard_E64-16s_v3', 'Standard_E64-32s_v3', 'Standard_E64_v3',
'Standard_E64s_v3', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_F1',
'Standard_F16', 'Standard_F16s', 'Standard_F16s_v2', 'Standard_F1s',
'Standard_F2', 'Standard_F2s', 'Standard_F2s_v2', 'Standard_F32s_v2',
'Standard_F4', 'Standard_F4s', 'Standard_F4s_v2', 'Standard_F64s_v2',
'Standard_F72s_v2', 'Standard_F8', 'Standard_F8s', 'Standard_F8s_v2',
'Standard_G1', 'Standard_G2', 'Standard_G3', 'Standard_G4', 'Standard_G5',
'Standard_GS1', 'Standard_GS2', 'Standard_GS3', 'Standard_GS4',
'Standard_GS4-4', 'Standard_GS4-8', 'Standard_GS5', 'Standard_GS5-16',
'Standard_GS5-8', 'Standard_H16', 'Standard_H16m', 'Standard_H16mr',
'Standard_H16r', 'Standard_H8', 'Standard_H8m', 'Standard_L16s',
'Standard_L32s', 'Standard_L4s', 'Standard_L8s', 'Standard_M128-32ms',
'Standard_M128-64ms', 'Standard_M128ms', 'Standard_M128s',
'Standard_M64-16ms', 'Standard_M64-32ms', 'Standard_M64ms',
'Standard_M64s', 'Standard_NC12', 'Standard_NC12s_v2',
'Standard_NC12s_v3', 'Standard_NC24', 'Standard_NC24r',
'Standard_NC24rs_v2', 'Standard_NC24rs_v3', 'Standard_NC24s_v2',
'Standard_NC24s_v3', 'Standard_NC6', 'Standard_NC6s_v2',
'Standard_NC6s_v3', 'Standard_ND12s', 'Standard_ND24rs', 'Standard_ND24s',
'Standard_ND6s', 'Standard_NV12', 'Standard_NV24', 'Standard_NV6'
:type vm_size: str or
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceVMSizeTypes
:param os_disk_size_gb: OS Disk Size in GB to be used to specify the disk
size for every machine in this master/agent pool. If you specify 0, it
will apply the default osDisk size according to the vmSize specified.
:type os_disk_size_gb: int
:param vnet_subnet_id: VNet SubnetID specifies the VNet's subnet
identifier.
:type vnet_subnet_id: str
:param max_pods: Maximum number of pods that can run on a node.
:type max_pods: int
:param os_type: OsType to be used to specify os type. Choose from Linux
and Windows. Default to Linux. Possible values include: 'Linux',
'Windows'. Default value: "Linux" .
:type os_type: str or
~azure.mgmt.containerservice.v2019_02_01.models.OSType
:param max_count: Maximum number of nodes for auto-scaling
:type max_count: int
:param min_count: Minimum number of nodes for auto-scaling
:type min_count: int
:param enable_auto_scaling: Whether to enable auto-scaler
:type enable_auto_scaling: bool
:param type: AgentPoolType represents types of an agent pool. Possible
values include: 'VirtualMachineScaleSets', 'AvailabilitySet'
:type type: str or
~azure.mgmt.containerservice.v2019_02_01.models.AgentPoolType
:param orchestrator_version: Version of orchestrator specified when
creating the managed cluster.
:type orchestrator_version: str
:ivar provisioning_state: The current deployment or provisioning state,
which only appears in the response.
:vartype provisioning_state: str
:param availability_zones: (PREVIEW) Availability zones for nodes. Must
use VirtualMachineScaleSets AgentPoolType.
:type availability_zones: list[str]
:param name: Required. Unique name of the agent pool profile in the
context of the subscription and resource group.
:type name: str
"""
_validation = {
'count': {'required': True, 'maximum': 100, 'minimum': 1},
'vm_size': {'required': True},
'provisioning_state': {'readonly': True},
'name': {'required': True, 'pattern': r'^[a-z][a-z0-9]{0,11}$'},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'vnetSubnetID', 'type': 'str'},
'max_pods': {'key': 'maxPods', 'type': 'int'},
'os_type': {'key': 'osType', 'type': 'str'},
'max_count': {'key': 'maxCount', 'type': 'int'},
'min_count': {'key': 'minCount', 'type': 'int'},
'enable_auto_scaling': {'key': 'enableAutoScaling', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'orchestrator_version': {'key': 'orchestratorVersion', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'availability_zones': {'key': 'availabilityZones', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ManagedClusterAgentPoolProfile, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class ManagedClusterPoolUpgradeProfile(Model):
"""The list of available upgrade versions.
All required parameters must be populated in order to send to Azure.
:param kubernetes_version: Required. Kubernetes version (major, minor,
patch).
:type kubernetes_version: str
:param name: Pool name.
:type name: str
:param os_type: Required. OsType to be used to specify os type. Choose
from Linux and Windows. Default to Linux. Possible values include:
'Linux', 'Windows'. Default value: "Linux" .
:type os_type: str or
~azure.mgmt.containerservice.v2019_02_01.models.OSType
:param upgrades: List of orchestrator types and versions available for
upgrade.
:type upgrades: list[str]
"""
_validation = {
'kubernetes_version': {'required': True},
'os_type': {'required': True},
}
_attribute_map = {
'kubernetes_version': {'key': 'kubernetesVersion', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'upgrades': {'key': 'upgrades', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ManagedClusterPoolUpgradeProfile, self).__init__(**kwargs)
self.kubernetes_version = kwargs.get('kubernetes_version', None)
self.name = kwargs.get('name', None)
self.os_type = kwargs.get('os_type', "Linux")
self.upgrades = kwargs.get('upgrades', None)
class ManagedClusterServicePrincipalProfile(Model):
"""Information about a service principal identity for the cluster to use for
manipulating Azure APIs.
All required parameters must be populated in order to send to Azure.
:param client_id: Required. The ID for the service principal.
:type client_id: str
:param secret: The secret password associated with the service principal
in plain text.
:type secret: str
"""
_validation = {
'client_id': {'required': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'secret': {'key': 'secret', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ManagedClusterServicePrincipalProfile, self).__init__(**kwargs)
self.client_id = kwargs.get('client_id', None)
self.secret = kwargs.get('secret', None)
class ManagedClusterUpgradeProfile(Model):
"""The list of available upgrades for compute pools.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of upgrade profile.
:vartype id: str
:ivar name: Name of upgrade profile.
:vartype name: str
:ivar type: Type of upgrade profile.
:vartype type: str
:param control_plane_profile: Required. The list of available upgrade
versions for the control plane.
:type control_plane_profile:
~azure.mgmt.containerservice.v2019_02_01.models.ManagedClusterPoolUpgradeProfile
:param agent_pool_profiles: Required. The list of available upgrade
versions for agent pools.
:type agent_pool_profiles:
list[~azure.mgmt.containerservice.v2019_02_01.models.ManagedClusterPoolUpgradeProfile]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'control_plane_profile': {'required': True},
'agent_pool_profiles': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'control_plane_profile': {'key': 'properties.controlPlaneProfile', 'type': 'ManagedClusterPoolUpgradeProfile'},
'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[ManagedClusterPoolUpgradeProfile]'},
}
def __init__(self, **kwargs):
super(ManagedClusterUpgradeProfile, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.control_plane_profile = kwargs.get('control_plane_profile', None)
self.agent_pool_profiles = kwargs.get('agent_pool_profiles', None)
class OperationValue(Model):
"""Describes the properties of a Compute Operation value.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar origin: The origin of the compute operation.
:vartype origin: str
:ivar name: The name of the compute operation.
:vartype name: str
:ivar operation: The display name of the compute operation.
:vartype operation: str
:ivar resource: The display name of the resource the operation applies to.
:vartype resource: str
:ivar description: The description of the operation.
:vartype description: str
:ivar provider: The resource provider for the operation.
:vartype provider: str
"""
_validation = {
'origin': {'readonly': True},
'name': {'readonly': True},
'operation': {'readonly': True},
'resource': {'readonly': True},
'description': {'readonly': True},
'provider': {'readonly': True},
}
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'operation': {'key': 'display.operation', 'type': 'str'},
'resource': {'key': 'display.resource', 'type': 'str'},
'description': {'key': 'display.description', 'type': 'str'},
'provider': {'key': 'display.provider', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OperationValue, self).__init__(**kwargs)
self.origin = None
self.name = None
self.operation = None
self.resource = None
self.description = None
self.provider = None
class OrchestratorProfile(Model):
"""Contains information about orchestrator.
All required parameters must be populated in order to send to Azure.
:param orchestrator_type: Required. Orchestrator type.
:type orchestrator_type: str
:param orchestrator_version: Required. Orchestrator version (major, minor,
patch).
:type orchestrator_version: str
"""
_validation = {
'orchestrator_type': {'required': True},
'orchestrator_version': {'required': True},
}
_attribute_map = {
'orchestrator_type': {'key': 'orchestratorType', 'type': 'str'},
'orchestrator_version': {'key': 'orchestratorVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OrchestratorProfile, self).__init__(**kwargs)
self.orchestrator_type = kwargs.get('orchestrator_type', None)
self.orchestrator_version = kwargs.get('orchestrator_version', None)
class TagsObject(Model):
"""Tags object for patch operations.
:param tags: Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(TagsObject, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
| 43.776818 | 139 | 0.670526 | [
"Unlicense",
"MIT"
] | amcclead7336/Enterprise_Data_Science_Final | venv/lib/python3.8/site-packages/azure/mgmt/containerservice/v2019_02_01/models/_models.py | 60,806 | Python |
import pandas as pd
from nibabel.tmpdirs import InTemporaryDirectory
from nose.tools import (assert_raises,
assert_true,
)
from nistats.utils import _check_events_file_uses_tab_separators
def make_data_for_test_runs():
data_for_temp_datafile = [
['csf', 'constant', 'linearTrend', 'wm'],
[13343.032102491035, 1.0, 0.0, 9486.199545677482],
[13329.224068063204, 1.0, 1.0, 9497.003324892803],
[13291.755627241291, 1.0, 2.0, 9484.012965365506],
]
delimiters = {
'tab': '\t',
'comma': ',',
'space': ' ',
'semicolon': ';',
'hyphen': '-',
}
return data_for_temp_datafile, delimiters
def _create_test_file(temp_csv, test_data, delimiter):
test_data = pd.DataFrame(test_data)
test_data.to_csv(temp_csv, sep=delimiter)
def _run_test_for_invalid_separator(filepath, delimiter_name):
if delimiter_name not in ('tab', 'comma'):
with assert_raises(ValueError):
_check_events_file_uses_tab_separators(events_files=filepath)
else:
result = _check_events_file_uses_tab_separators(events_files=filepath)
assert_true(result is None)
def test_for_invalid_separator():
data_for_temp_datafile, delimiters = make_data_for_test_runs()
for delimiter_name, delimiter_char in delimiters.items():
with InTemporaryDirectory():
temp_tsv_file = 'tempfile.{} separated values'.format(
delimiter_name)
_create_test_file(temp_csv=temp_tsv_file ,
test_data=data_for_temp_datafile,
delimiter=delimiter_char)
_run_test_for_invalid_separator(filepath=temp_tsv_file ,
delimiter_name=delimiter_name)
def test_with_2D_dataframe():
data_for_pandas_dataframe, _ = make_data_for_test_runs()
events_pandas_dataframe = pd.DataFrame(data_for_pandas_dataframe)
result = _check_events_file_uses_tab_separators(
events_files=events_pandas_dataframe)
assert_true(result is None)
def test_with_1D_dataframe():
data_for_pandas_dataframe, _ = make_data_for_test_runs()
for dataframe_ in data_for_pandas_dataframe:
events_pandas_dataframe = pd.DataFrame(dataframe_)
result = _check_events_file_uses_tab_separators(
events_files=events_pandas_dataframe)
assert_true(result is None)
def test_for_invalid_filepath():
filepath = 'junk_file_path.csv'
result = _check_events_file_uses_tab_separators(events_files=filepath)
assert_true(result is None)
def test_for_pandas_dataframe():
events_pandas_dataframe = pd.DataFrame([['a', 'b', 'c'], [0, 1, 2]])
result = _check_events_file_uses_tab_separators(
events_files=events_pandas_dataframe)
assert_true(result is None)
def test_binary_opening_an_image():
img_data = bytearray(
b'GIF87a\x01\x00\x01\x00\xe7*\x00\x00\x00\x00\x01\x01\x01\x02\x02'
b'\x07\x08\x08\x08\x0b\x0b\x0b\x0c\x0c\x0c\r;')
with InTemporaryDirectory():
temp_img_file = 'temp_img.gif'
with open(temp_img_file, 'wb') as temp_img_obj:
temp_img_obj.write(img_data)
with assert_raises(ValueError):
_check_events_file_uses_tab_separators(
events_files=temp_img_file)
def test_binary_bytearray_of_ints_data():
temp_data_bytearray_from_ints = bytearray([0, 1, 0, 11, 10])
with InTemporaryDirectory():
temp_bin_file = 'temp_bin.bin'
with open(temp_bin_file, 'wb') as temp_bin_obj:
temp_bin_obj.write(temp_data_bytearray_from_ints)
with assert_raises(ValueError):
_check_events_file_uses_tab_separators(
events_files=temp_bin_file)
if __name__ == '__main__':
def _run_tests_print_test_messages(test_func):
from pprint import pprint
pprint(['Running', test_func.__name__])
test_func()
pprint('... complete')
def run_test_suite():
tests = [
test_for_invalid_filepath,
test_with_2D_dataframe,
test_with_1D_dataframe,
test_for_invalid_filepath,
test_for_pandas_dataframe,
test_binary_opening_an_image,
test_binary_bytearray_of_ints_data,
]
for test_ in tests:
_run_tests_print_test_messages(test_func=test_)
run_test_suite()
| 33.977612 | 78 | 0.664617 | [
"BSD-3-Clause"
] | bthirion/nistats | nistats/tests/test_check_events_file_uses_tab_separators.py | 4,553 | Python |
from django import forms
from django.http import QueryDict
from django.forms.formsets import formset_factory
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from datetime import date
import itertools
import re
from fields import SubmitButtonField, SubmitButtonWidget
class Filter(object):
__metaclass__ = ABCMeta
_order = itertools.count()
form_field_class = None
form_field_widget = None
filter_state_names = ['%s', ]
filter_field = ''
def __init__(self,
default=None,
required=False,
label=None,
form_field_class=None,
form_field_widget=None,
filter_set=False,
filter_field=None):
self.default = default
self.required = required
self.label = label
self.form_field_class = form_field_class or self.form_field_class
self.form_field_widget = form_field_widget or self.form_field_widget
self.order = Filter._order.next()
self.filter_set = filter_set
self.filter_field = filter_field or self.filter_field
def get_form_field(self):
"""
Returns an instance of the form field class, used for constructing the
filter form for a report.
"""
return self.form_field_class(required=(self.required and not self.filter_set),
widget=self.form_field_widget,
label=self.label)
def get_form_class(self, name, index=0, postfix="Form"):
form_class_name = "%s%s" % (type(self).__name__, postfix)
form_class_dict = {name: self.get_form_field()}
return type(form_class_name, (forms.Form,), form_class_dict)
def clean_data(self, name, raw_data):
form = self.get_form_class(name)(data=raw_data)
return form.cleaned_data[name] if form.is_valid() else None
def get_data(self, name, data):
"""
To get the data for this filter given the filter sets, we instantiate
the form with the data, validate it, and return the cleaned data.
"""
cleaned_data = self.clean_data(name, data)
return cleaned_data if cleaned_data else self.default
def get_data_set(self, name, data):
"""
This horribly ugly little function is in charge of returning a list of
data entries, given filter states, for a filter set. It does the same
thing as get_data, but for every item in a filter set, returning the
results in a list.
"""
# If we're not really a set, just return a 1-element list with the data
if not self.filter_set:
return [self.get_data(name, data)]
# Get the deletion field name and index
delete = data.get('delete', None)
delete_index = None
if delete:
n, i = delete.split('.')
if n == name:
delete_index = int(i) + 1
# Zip together all the lists of filter state values. This gives us a
# list of tuples of filter state fields. Ugly but necessary in case we
# have a filter which generates a MultiValueField (aka,
# NumericComparisonFilter). Exclude elements which have been deleted.
filter_state_names = self.filter_state_names[:]
filter_state_list = [data.getlist(state_name % name, []) for state_name in filter_state_names]
filter_states = zip(*filter_state_list)
# Loop over every filter state tuple, converting it to a mini filter-
# -state dict. Clean it, and store the cleaned data in a list
data_set = []
for i in range(len(filter_states)):
# If this index is getting deleted, don't add it
if i == delete_index:
continue
# Get the dict of states for this filter set element
state = filter_states[i]
filter_dict = {}
for i in range(0, len(filter_state_names)):
filter_dict.update({filter_state_names[i] % name: state[i]})
# Clean and validate the set instance data. If it validates, store
# it in the state list.
cleaned_data = self.clean_data(name, filter_dict)
if cleaned_data:
data_elem = cleaned_data
data_set.append(data_elem)
# Return the list of states
return data_set
def get_filter_state_from_data(self, name, data):
"""
Another nasty little bit. This one (if not overridden) takes some
data and encodes it, using the filter state names, to be a valid
filter_state which would return the original data if passed to get_data
TODO: Make sure this actually works for stuff other than
NumericComparisonFilter
TODO: Add good comments :P
"""
if len(self.filter_state_names) > 1:
if not (hasattr(data, '__iter__') and len(self.filter_state_names) == len(data)):
raise Exception()
state = {}
for i in range(0, len(data)):
state.update({self.filter_state_names[i] % name: data[i]})
return state
else:
return {self.filter_state_names[0] % name: data}
def apply_filter(self, queryset, data):
filterspec = {self.filter_field: data}
return queryset.filter(**filterspec)
def apply_filter_set(self, queryset, data_set):
# Apply the filter to the queryset based on each entry in the data set
for data in data_set:
queryset = self.apply_filter(queryset, data)
return queryset
class Report(object):
__metaclass__ = ABCMeta
headers = None
footers = None
title = None
def __init__(self, filter_states={}):
"""
filter_state will be a querydict with keys corresponding to the names
of the filter members on this report object.
"""
if isinstance(filter_states, QueryDict):
self.filter_states = filter_states
else:
self.filter_states = QueryDict('', mutable=True)
self.filter_states.update(filter_states)
self.title = self.title or self.get_title_from_class_name()
def __getattribute__(self, name):
"""
When getting a filter attribute, looks for the corresponding filter
state and returns that instead of the filter object. If none is found,
looks for the default value on the filter object. If that's not found
either, then returns none.
"""
# Perform the normal __getattribute__ call
attr = object.__getattribute__(self, name)
# If it's a filter attribute...
if issubclass(type(attr), Filter):
# If we have a filter state for this filter, convert it to the type
# of data for this filter.
if not attr.filter_set:
return attr.get_data(name, self.filter_states)
else:
return attr.get_data_set(name, self.filter_states)
# This isn't a filter, just return the attribute
return attr
def get_title_from_class_name(self):
"""
Split the class name into words, delimited by capitals.
"""
words = re.split(r'([A-Z])', self.__class__.__name__)[1:]
words = [words[i] + words[i+1] for i in range(0, len(words) - 1, 2)]
return ' '.join(words)
def get_filter(self, name):
"""
Perform the normal __getattribute__ call,
and return it if it's a filter
"""
attr = object.__getattribute__(self, name)
return attr if issubclass(type(attr), Filter) else None
def get_filters(self):
"""
Return a list of all the names and attributes on this report instance
which have a base class of Filter.
"""
filters = []
for name in dir(self):
attr = object.__getattribute__(self, name)
if issubclass(type(attr), Filter):
filters.append((name, attr))
return sorted(filters, key=lambda attr: attr[1].order)
def get_filter_forms(self):
for name, attr in self.get_filters():
# If it is a filter set, loop through the existing list of data
# in the filter states, if there are any. For each of these, make a
# sub-form which includes a "delete" checkbox
if attr.filter_set:
# Get the new-set element form
form = attr.get_form_class(name)()
form.name = name
yield form
# Yield all the existing form elements
data_set = attr.get_data_set(name, self.filter_states)
for i in range(len(data_set)):
data = data_set[i]
state = attr.get_filter_state_from_data(name, data)
# Generate and yield a form containing the filter's field,
# as well as a deleting submit field to mark deletions
form = attr.get_form_class(
name=name,
postfix="FormSetElem"
)(data=state)
form.delete = {
'filter': name,
'index': i}
form.name = name
yield form
# If it ain't a filter set, just get it's form class and render it
# with the filter state data
else:
form = attr.get_form_class(name)(data=self.filter_states)
form.name = name
yield form
def get_title(self):
return self.title
def get_headers(self):
return self.headers
def get_footers(self):
return self.footers
def apply_filter(self, queryset, name):
f = self.get_filter(name)
# If it's not a filterset, just get the regular data and apply it
if not f.filter_set:
data = f.get_data(name, self.filter_states)
if data:
return f.apply_filter(queryset, data)
# Otherwise, get the full data set and apply it
else:
data_set = f.get_data_set(name, self.filter_states)
if len(data_set) > 0:
return f.apply_filter_set(queryset, data_set)
# If we weren't able to apply the filter, return the raw queryset
return queryset
def apply_filters(self, queryset, names=None, excludes=[]):
for name, f in self.get_filters():
# Only apply this filter if it's selected
if name in excludes or (names and name not in names):
continue
# Apply this filter
queryset = self.apply_filter(queryset, name)
# Return the filtered queryset
return queryset
def get_queryset(self):
return []
def get_row(self, item):
"""
This can return a list for simple data that doesn't need special
template rendering, or a dict for more complex data where individual
fields will need to be rendered specially.
"""
return []
def get_rows(self):
rows = []
for item in self.get_queryset():
row = self.get_row(item)
if row:
rows.append(row)
return rows
def get_count(self):
return self.get_queryset().count()
def get_table(self):
return [[cell for cell in row] for row in self.get_rows()]
@staticmethod
def encode_filter_states(data):
"""
Converts a normal POST querydict to the filterstate data,
to be stored in the url
"""
#data = QueryDict(data.urlencode(), mutable=True)
return data
@staticmethod
def decode_filter_states(data):
"""
Opposite of encode_filter_states
"""
return data
class Row(object):
def __init__(self, list, attrs=None):
self.list = list
if attrs:
for name, value in attrs.iteritems():
setattr(self, name, value)
def __iter__(self):
return self.list.__iter__()
| 35.008547 | 102 | 0.595296 | [
"MIT"
] | flagshipenterprise/django-prickly-reports | reporting/base.py | 12,288 | Python |
"""A Mailman newsletter subscription interface.
To use this plugin, enable the newsletter module and set the newsletter module and name settings
in the admin settings page.
"""
from django.utils.translation import ugettext as _
from Mailman import MailList, Errors
from models import Subscription
from satchmo.configuration import config_value
import logging
import sys
log = logging.getLogger('newsletter.mailman')
class UserDesc: pass
def is_subscribed(contact):
return Subscription.email_is_subscribed(contact.email)
def update_contact(contact, subscribe, attributes={}):
email = contact.email
current = Subscription.email_is_subscribed(email)
attributesChanged = False
sub = None
if attributes:
sub, created = Subscription.objects.get_or_create(email=email)
if created:
attributesChanged = True
else:
oldAttr = [(a.name,a.value) for a in sub.attributes.all()]
oldAttr.sort()
sub.update_attributes(attributes)
newAttr = [(a.name,a.value) for a in sub.attributes.all()]
newAttr.sort()
if not created:
attributesChanged = oldAttr != newAttr
if current == subscribe:
if subscribe:
if attributesChanged:
result = _("Updated subscription for %(email)s.")
else:
result = _("Already subscribed %(email)s.")
else:
result = _("Already removed %(email)s.")
else:
if not sub:
sub, created = Subscription.objects.get_or_create(email=email)
sub.subscribed = subscribe
sub.save()
if subscribe:
mailman_add(contact)
result = _("Subscribed: %(email)s")
else:
mailman_remove(contact)
result = _("Unsubscribed: %(email)s")
return result % { 'email' : email }
def mailman_add(contact, listname=None, send_welcome_msg=None, admin_notify=None):
"""Add a Satchmo contact to a mailman mailing list.
Parameters:
- `Contact`: A Satchmo Contact
- `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME
- `send_welcome_msg`: True or False, defaulting to the list default
- `admin_notify`: True of False, defaulting to the list default
"""
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman adding %s to %s' % (contact.email, listname)
if send_welcome_msg is None:
send_welcome_msg = mm.send_welcome_msg
userdesc = UserDesc()
userdesc.fullname = contact.full_name
userdesc.address = contact.email
userdesc.digest = False
if mm.isMember(contact.email):
print >> sys.stderr, _('Already Subscribed: %s' % contact.email)
else:
try:
try:
mm.Lock()
mm.ApprovedAddMember(userdesc, send_welcome_msg, admin_notify)
mm.Save()
print >> sys.stderr, _('Subscribed: %(email)s') % { 'email' : contact.email }
except Errors.MMAlreadyAMember:
print >> sys.stderr, _('Already a member: %(email)s') % { 'email' : contact.email }
except Errors.MMBadEmailError:
if userdesc.address == '':
print >> sys.stderr, _('Bad/Invalid email address: blank line')
else:
print >> sys.stderr, _('Bad/Invalid email address: %(email)s') % { 'email' : contact.email }
except Errors.MMHostileAddress:
print >> sys.stderr, _('Hostile address (illegal characters): %(email)s') % { 'email' : contact.email }
finally:
mm.Unlock()
def mailman_remove(contact, listname=None, userack=None, admin_notify=None):
"""Remove a Satchmo contact from a Mailman mailing list
Parameters:
- `contact`: A Satchmo contact
- `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME
- `userack`: True or False, whether to notify the user, defaulting to the list default
- `admin_notify`: True or False, defaulting to the list default
"""
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman removing %s from %s' % (contact.email, listname)
if mm.isMember(contact.email):
try:
mm.Lock()
mm.ApprovedDeleteMember(contact.email, 'satchmo.newsletter', admin_notify, userack)
mm.Save()
finally:
mm.Unlock()
def _get_maillist(listname):
try:
if not listname:
listname = config_value('NEWSLETTER', 'NEWSLETTER_NAME')
if listname == "":
log.warn("NEWSLETTER_NAME not set in store settings")
raise NameError('No NEWSLETTER_NAME in settings')
return MailList.MailList(listname, lock=0), listname
except Errors.MMUnknownListError:
print >> sys.stderr, "Can't find the MailMan newsletter: %s" % listname
raise NameError('No such newsletter, "%s"' % listname)
| 34.621622 | 119 | 0.62178 | [
"BSD-3-Clause"
] | sankroh/satchmo | satchmo/newsletter/mailman.py | 5,124 | Python |
# Generated by Django 3.1.7 on 2021-02-25 17:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('backoffice', '0005_auto_20210225_1712'),
]
operations = [
migrations.AddField(
model_name='buyingentry',
name='partition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='backoffice.partitionformulla'),
),
migrations.AlterField(
model_name='buyingentry',
name='buying',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='entries', to='backoffice.buying'),
),
migrations.AlterField(
model_name='buyingentry',
name='quantity',
field=models.IntegerField(verbose_name='Quantité achetée'),
),
]
| 30.866667 | 132 | 0.631749 | [
"MIT"
] | mono57/verger.stock-mgmt | backoffice/migrations/0006_auto_20210225_1755.py | 928 | Python |
#
# Copyright (c) 2017 Digital Shadows Ltd.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
from ds_model import DSModel
class InfrastructureSSL(DSModel):
def __init__(self, id, payload):
self._id = id
self._payload = payload
@property
def id(self):
return self._id
@property
def payload(self):
return self._payload
def __str__(self):
return 'InfrastructureSSL[id={}, payload={}]'.format(self.id, self.payload)
@classmethod
def from_json(cls, json):
cast = DSModel.cast
return cls(cast(json.get('id'), long), json)
| 20.806452 | 83 | 0.634109 | [
"Apache-2.0"
] | BenSterenson/phantom-apps | Apps/phdigitalshadows/dsapi/model/infrastructure_ssl.py | 645 | Python |
from django.urls import path
from django.views.i18n import JavaScriptCatalog
from .views import HelloWorldView
app_name = 'pokus1' # make possible use {% url 'pokus1:..' %}
# however this is maybe deprecated; you can achieve same in include(), see project level urls.py
urlpatterns = [
path('jsi18n/pokus1/', JavaScriptCatalog.as_view(), name='javascript-catalog'), # /pokus1/: unique app url, probably important
path('', HelloWorldView.as_view(), name='hello')
]
| 34.142857 | 131 | 0.730126 | [
"MIT"
] | zvolsky/example_translation | pokus1/urls.py | 478 | Python |
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def runtests(*test_args):
"""Setup and run django-lockdowns test suite."""
os.environ['DJANGO_SETTINGS_MODULE'] = 'lockdown.tests.test_settings'
django.setup()
if not test_args:
test_args = ['lockdown.tests']
test_runner = get_runner(settings)()
failures = test_runner.run_tests(test_args)
sys.exit(bool(failures))
if __name__ == '__main__':
runtests(*sys.argv[1:])
| 20.222222 | 73 | 0.705128 | [
"BSD-3-Clause"
] | carta/django-lockdown | runtests.py | 546 | Python |
import argparse
import collections
import functools
import itertools
import json
import multiprocessing as mp
import os
import pathlib
import re
import subprocess
import warnings
os.environ['NO_AT_BRIDGE'] = '1' # Hide X org false warning.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
np.set_string_function(lambda x: f'<np.array shape={x.shape} dtype={x.dtype}>')
Run = collections.namedtuple('Run', 'task method seed xs ys')
PALETTES = dict(
discrete=(
'#377eb8', '#4daf4a', '#984ea3', '#e41a1c', '#ff7f00', '#a65628',
'#f781bf', '#888888', '#a6cee3', '#b2df8a', '#cab2d6', '#fb9a99',
),
contrast=(
'#0022ff', '#33aa00', '#ff0011', '#ddaa00', '#cc44dd', '#0088aa',
'#001177', '#117700', '#990022', '#885500', '#553366', '#006666',
),
gradient=(
'#fde725', '#a0da39', '#4ac16d', '#1fa187', '#277f8e', '#365c8d',
'#46327e', '#440154',
),
baselines=(
'#222222', '#666666', '#aaaaaa', '#cccccc',
),
)
LEGEND = dict(
fontsize='medium', numpoints=1, labelspacing=0, columnspacing=1.2,
handlelength=1.5, handletextpad=0.5, ncol=4, loc='lower center')
DEFAULT_BASELINES = [
'd4pg', 'dqn_sticky', 'rainbow_sticky', 'human$', 'impala']
BINS = collections.defaultdict(int)
BINS.update(dmc=1e5, atari=1e6, particle=1e5)
def find_keys(args):
filenames = []
for indir in args.indir:
task = next(indir.iterdir()) # First only.
for method in task.iterdir():
seed = next(indir.iterdir()) # First only.
filenames += list(seed.glob('**/*.jsonl'))
keys = set()
for filename in filenames:
keys |= set(load_jsonl(filename).columns)
print(f'Keys ({len(keys)}):', ', '.join(keys), flush=True)
def load_runs(args):
total, toload = [], []
for indir in args.indir:
filenames = list(indir.glob('**/*.jsonl'))
total += filenames
for filename in filenames:
task, method, seed = filename.relative_to(indir).parts[:-1]
if not any(p.search(task) for p in args.tasks):
continue
if not any(p.search(method) for p in args.methods):
continue
toload.append((filename, indir))
print(f'Loading {len(toload)} of {len(total)} runs...')
jobs = [functools.partial(load_run, f, i, args) for f, i in toload]
# Disable async data loading:
# runs = [j() for j in jobs]
with mp.Pool(10) as pool:
promises = [pool.apply_async(j) for j in jobs]
runs = [p.get() for p in promises]
runs = [r for r in runs if r is not None]
return runs
def load_run(filename, indir, args):
task, method, seed = filename.relative_to(indir).parts[:-1]
prefix = f'indir{args.indir.index(indir)+1}_'
if task == 'atari_jamesbond':
task = 'atari_james_bond'
seed = prefix + seed
if args.prefix:
method = prefix + method
df = load_jsonl(filename)
if df is None:
print('Skipping empty run')
return
try:
df = df[[args.xaxis, args.yaxis]].dropna()
if args.maxval:
df = df.replace([+np.inf], +args.maxval)
df = df.replace([-np.inf], -args.maxval)
df[args.yaxis] = df[args.yaxis].clip(-args.maxval, +args.maxval)
except KeyError:
return
xs = df[args.xaxis].to_numpy()
ys = df[args.yaxis].to_numpy()
bins = BINS[task.split('_')[0]] if args.bins == -1 else args.bins
if bins:
borders = np.arange(0, xs.max() + 1e-8, bins)
xs, ys = bin_scores(xs, ys, borders)
if not len(xs):
print('Skipping empty run', task, method, seed)
return
return Run(task, method, seed, xs, ys)
def load_baselines(patterns, prefix=False):
runs = []
directory = pathlib.Path(__file__).parent / 'scores'
for filename in directory.glob('**/*_baselines.json'):
for task, methods in json.loads(filename.read_text()).items():
for method, score in methods.items():
if prefix:
method = f'baseline_{method}'
if not any(p.search(method) for p in patterns):
continue
runs.append(Run(task, method, None, None, score))
return runs
def stats(runs, baselines):
tasks = sorted(set(r.task for r in runs))
methods = sorted(set(r.method for r in runs))
seeds = sorted(set(r.seed for r in runs))
baseline = sorted(set(r.method for r in baselines))
print('Loaded', len(runs), 'runs.')
print(f'Tasks ({len(tasks)}):', ', '.join(tasks))
print(f'Methods ({len(methods)}):', ', '.join(methods))
print(f'Seeds ({len(seeds)}):', ', '.join(seeds))
print(f'Baselines ({len(baseline)}):', ', '.join(baseline))
def order_methods(runs, baselines, args):
methods = []
for pattern in args.methods:
for method in sorted(set(r.method for r in runs)):
if pattern.search(method):
if method not in methods:
methods.append(method)
if method not in args.colors:
index = len(args.colors) % len(args.palette)
args.colors[method] = args.palette[index]
non_baseline_colors = len(args.colors)
for pattern in args.baselines:
for method in sorted(set(r.method for r in baselines)):
if pattern.search(method):
if method not in methods:
methods.append(method)
if method not in args.colors:
index = len(args.colors) - non_baseline_colors
index = index % len(PALETTES['baselines'])
args.colors[method] = PALETTES['baselines'][index]
return methods
def figure(runs, methods, args):
tasks = sorted(set(r.task for r in runs if r.xs is not None))
rows = int(np.ceil((len(tasks) + len(args.add)) / args.cols))
figsize = args.size[0] * args.cols, args.size[1] * rows
fig, axes = plt.subplots(rows, args.cols, figsize=figsize)
for task, ax in zip(tasks, axes.flatten()):
relevant = [r for r in runs if r.task == task]
plot(task, ax, relevant, methods, args)
for name, ax in zip(args.add, axes.flatten()[len(tasks):]):
ax.set_facecolor((0.9, 0.9, 0.9))
if name == 'median':
plot_combined(
'combined_median', ax, runs, methods, args,
lo='random', hi='human$',
agg=lambda x: np.nanmedian(x, -1))
elif name == 'mean':
plot_combined(
'combined_mean', ax, runs, methods, args,
lo='random', hi='human$',
agg=lambda x: np.nanmean(x, -1))
elif name == 'gamer_median':
plot_combined(
'combined_gamer_median', ax, runs, methods, args,
lo='random', hi='human$',
agg=lambda x: np.nanmedian(x, -1))
elif name == 'gamer_mean':
plot_combined(
'combined_gamer_mean', ax, runs, methods, args,
lo='random', hi='human$',
agg=lambda x: np.nanmean(x, -1))
elif name == 'record_mean':
plot_combined(
'combined_record_mean', ax, runs, methods, args,
lo='random', hi='record',
agg=lambda x: np.nanmean(x, -1))
elif name == 'clipped_record_mean':
plot_combined(
'combined_clipped_record_mean', ax, runs, methods, args,
lo='random', hi='record', clip=True,
agg=lambda x: np.nanmean(x, -1))
elif name == 'num_seeds':
plot_combined(
'combined_num_seeds', ax, runs, methods, args,
agg=lambda x: np.isfinite(x).sum(-1))
elif name == 'human_above':
plot_combined(
'combined_above_human$', ax, runs, methods, args,
agg=lambda y: (y >= 1.0).astype(float).sum(-1))
elif name == 'human_below':
plot_combined(
'combined_below_human$', ax, runs, methods, args,
agg=lambda y: (y <= 1.0).astype(float).sum(-1))
else:
raise NotImplementedError(name)
if args.xlim:
for ax in axes[:-1].flatten():
ax.xaxis.get_offset_text().set_visible(False)
if args.xlabel:
for ax in axes[-1]:
ax.set_xlabel(args.xlabel)
if args.ylabel:
for ax in axes[:, 0]:
ax.set_ylabel(args.ylabel)
for ax in axes.flatten()[len(tasks) + len(args.add):]:
ax.axis('off')
legend(fig, args.labels, **LEGEND)
return fig
def plot(task, ax, runs, methods, args):
assert runs
try:
title = task.split('_', 1)[1].replace('_', ' ').title()
except IndexError:
title = task.title()
ax.set_title(title)
xlim = [+np.inf, -np.inf]
for index, method in enumerate(methods):
relevant = [r for r in runs if r.method == method]
if not relevant:
continue
if any(r.xs is None for r in relevant):
baseline(index, method, ax, relevant, args)
else:
if args.aggregate == 'none':
xs, ys = curve_lines(index, task, method, ax, relevant, args)
else:
xs, ys = curve_area(index, task, method, ax, relevant, args)
if len(xs) == len(ys) == 0:
print(f'Skipping empty: {task} {method}')
continue
xlim = [min(xlim[0], xs.min()), max(xlim[1], xs.max())]
ax.ticklabel_format(axis='x', style='sci', scilimits=(0, 0))
steps = [1, 2, 2.5, 5, 10]
ax.xaxis.set_major_locator(ticker.MaxNLocator(args.xticks, steps=steps))
ax.yaxis.set_major_locator(ticker.MaxNLocator(args.yticks, steps=steps))
if np.isfinite(xlim).all():
ax.set_xlim(args.xlim or xlim)
if args.xlim:
ticks = sorted({*ax.get_xticks(), *args.xlim})
ticks = [x for x in ticks if args.xlim[0] <= x <= args.xlim[1]]
ax.set_xticks(ticks)
if args.ylim:
ax.set_ylim(args.ylim)
if args.ylimticks:
ticks = sorted({*ax.get_yticks(), *args.ylim})
ticks = [x for x in ticks if args.ylim[0] <= x <= args.ylim[1]]
ax.set_yticks(ticks)
def plot_combined(
name, ax, runs, methods, args, agg, lo=None, hi=None, clip=False):
tasks = sorted(set(run.task for run in runs if run.xs is not None))
seeds = list(set(run.seed for run in runs))
runs = [r for r in runs if r.task in tasks] # Discard unused baselines.
# Bin all runs onto the same X steps.
borders = sorted(
[r.xs for r in runs if r.xs is not None],
key=lambda x: np.nanmax(x))[-1]
for index, run in enumerate(runs):
if run.xs is None:
continue
xs, ys = bin_scores(run.xs, run.ys, borders)
runs[index] = run._replace(xs=xs, ys=ys)
# Per-task normalization by low and high baseline.
if lo or hi:
mins = collections.defaultdict(list)
maxs = collections.defaultdict(list)
[mins[r.task].append(r.ys) for r in load_baselines([re.compile(lo)])]
[maxs[r.task].append(r.ys) for r in load_baselines([re.compile(hi)])]
mins = {task: min(ys) for task, ys in mins.items() if task in tasks}
maxs = {task: max(ys) for task, ys in maxs.items() if task in tasks}
missing_baselines = []
for task in tasks:
if task not in mins or task not in maxs:
missing_baselines.append(task)
if set(missing_baselines) == set(tasks):
print(f'No baselines found to normalize any tasks in {name} plot.')
else:
for task in missing_baselines:
print(f'No baselines found to normalize {task} in {name} plot.')
for index, run in enumerate(runs):
if run.task not in mins or run.task not in maxs:
continue
ys = (run.ys - mins[run.task]) / (maxs[run.task] - mins[run.task])
if clip:
ys = np.minimum(ys, 1.0)
runs[index] = run._replace(ys=ys)
# Aggregate across tasks but not methods or seeds.
combined = []
for method, seed in itertools.product(methods, seeds):
relevant = [r for r in runs if r.method == method and r.seed == seed]
if not relevant:
continue
if relevant[0].xs is None:
xs, ys = None, np.array([r.ys for r in relevant])
else:
xs, ys = stack_scores(*zip(*[(r.xs, r.ys) for r in relevant]))
with warnings.catch_warnings(): # Ignore empty slice warnings.
warnings.simplefilter('ignore', category=RuntimeWarning)
combined.append(Run('combined', method, seed, xs, agg(ys)))
plot(name, ax, combined, methods, args)
def curve_lines(index, task, method, ax, runs, args):
zorder = 10000 - 10 * index - 1
for run in runs:
color = args.colors[method]
ax.plot(run.xs, run.ys, label=method, color=color, zorder=zorder)
return runs[0].xs, runs[0].ys
def curve_area(index, task, method, ax, runs, args):
xs, ys = stack_scores(*zip(*[(r.xs, r.ys) for r in runs]))
with warnings.catch_warnings(): # NaN buckets remain NaN.
warnings.simplefilter('ignore', category=RuntimeWarning)
if args.aggregate == 'std1':
mean, std = np.nanmean(ys, -1), np.nanstd(ys, -1)
lo, mi, hi = mean - std, mean, mean + std
elif args.aggregate == 'per0':
lo, mi, hi = [np.nanpercentile(ys, k, -1) for k in (0, 50, 100)]
elif args.aggregate == 'per5':
lo, mi, hi = [np.nanpercentile(ys, k, -1) for k in (5, 50, 95)]
elif args.aggregate == 'per25':
lo, mi, hi = [np.nanpercentile(ys, k, -1) for k in (25, 50, 75)]
else:
raise NotImplementedError(args.aggregate)
color = args.colors[method]
kw = dict(color=color, zorder=1000 - 10 * index, alpha=0.1, linewidths=0)
ax.fill_between(xs, lo, hi, **kw)
ax.plot(xs, mi, label=method, color=color, zorder=10000 - 10 * index - 1)
return xs, mi
def baseline(index, method, ax, runs, args):
assert all(run.xs is None for run in runs)
ys = np.array([run.ys for run in runs])
mean, std = ys.mean(), ys.std()
color = args.colors[method]
kw = dict(color=color, zorder=500 - 20 * index - 1, alpha=0.1, linewidths=0)
ax.fill_between([-np.inf, np.inf], [mean - std] * 2, [mean + std] * 2, **kw)
kw = dict(ls='--', color=color, zorder=5000 - 10 * index - 1)
ax.axhline(mean, label=method, **kw)
def legend(fig, mapping=None, **kwargs):
entries = {}
for ax in fig.axes:
for handle, label in zip(*ax.get_legend_handles_labels()):
if mapping and label in mapping:
label = mapping[label]
entries[label] = handle
leg = fig.legend(entries.values(), entries.keys(), **kwargs)
leg.get_frame().set_edgecolor('white')
extent = leg.get_window_extent(fig.canvas.get_renderer())
extent = extent.transformed(fig.transFigure.inverted())
yloc, xloc = kwargs['loc'].split()
y0 = dict(lower=extent.y1, center=0, upper=0)[yloc]
y1 = dict(lower=1, center=1, upper=extent.y0)[yloc]
x0 = dict(left=extent.x1, center=0, right=0)[xloc]
x1 = dict(left=1, center=1, right=extent.x0)[xloc]
fig.tight_layout(rect=[x0, y0, x1, y1], h_pad=0.5, w_pad=0.5)
def save(fig, args):
args.outdir.mkdir(parents=True, exist_ok=True)
filename = args.outdir / 'curves.png'
fig.savefig(filename, dpi=args.dpi)
print('Saved to', filename)
filename = args.outdir / 'curves.pdf'
fig.savefig(filename)
try:
subprocess.call(['pdfcrop', str(filename), str(filename)])
except FileNotFoundError:
print('Install texlive-extra-utils to crop PDF outputs.')
def bin_scores(xs, ys, borders, reducer=np.nanmean):
order = np.argsort(xs)
xs, ys = xs[order], ys[order]
binned = []
with warnings.catch_warnings(): # Empty buckets become NaN.
warnings.simplefilter('ignore', category=RuntimeWarning)
for start, stop in zip(borders[:-1], borders[1:]):
left = (xs <= start).sum()
right = (xs <= stop).sum()
binned.append(reducer(ys[left:right]))
return borders[1:], np.array(binned)
def stack_scores(multiple_xs, multiple_ys):
longest_xs = sorted(multiple_xs, key=lambda x: len(x))[-1]
multiple_padded_ys = []
for xs, ys in zip(multiple_xs, multiple_ys):
assert (longest_xs[:len(xs)] == xs).all(), (list(xs), list(longest_xs))
padding = [np.inf] * (len(longest_xs) - len(xs))
padded_ys = np.concatenate([ys, padding])
multiple_padded_ys.append(padded_ys)
stacked_ys = np.stack(multiple_padded_ys, -1)
return longest_xs, stacked_ys
def load_jsonl(filename):
try:
with filename.open() as f:
lines = list(f.readlines())
records = []
for index, line in enumerate(lines):
try:
records.append(json.loads(line))
except Exception:
if index == len(lines) - 1:
continue # Silently skip last line if it is incomplete.
raise ValueError(
f'Skipping invalid JSON line ({index+1}/{len(lines)+1}) in'
f'{filename}: {line}')
return pd.DataFrame(records)
except ValueError as e:
print('Invalid', filename, e)
return None
def save_runs(runs, filename):
filename.parent.mkdir(parents=True, exist_ok=True)
records = []
for run in runs:
if run.xs is None:
continue
records.append(dict(
task=run.task, method=run.method, seed=run.seed,
xs=run.xs.tolist(), ys=run.ys.tolist()))
runs = json.dumps(records)
filename.write_text(runs)
print('Saved', filename)
def main(args):
find_keys(args)
runs = load_runs(args)
save_runs(runs, args.outdir / 'runs.jsonl')
baselines = load_baselines(args.baselines, args.prefix)
stats(runs, baselines)
methods = order_methods(runs, baselines, args)
if not runs:
print('Noting to plot.')
return
print('Plotting...')
fig = figure(runs + baselines, methods, args)
save(fig, args)
def parse_args():
boolean = lambda x: bool(['False', 'True'].index(x))
parser = argparse.ArgumentParser()
parser.add_argument('--indir', nargs='+', type=pathlib.Path, required=True)
parser.add_argument('--outdir', type=pathlib.Path, required=True)
parser.add_argument('--subdir', type=boolean, default=True)
parser.add_argument('--xaxis', type=str, required=True)
parser.add_argument('--yaxis', type=str, required=True)
parser.add_argument('--tasks', nargs='+', default=[r'.*'])
parser.add_argument('--methods', nargs='+', default=[r'.*'])
parser.add_argument('--baselines', nargs='+', default=DEFAULT_BASELINES)
parser.add_argument('--prefix', type=boolean, default=False)
parser.add_argument('--bins', type=float, default=-1)
parser.add_argument('--aggregate', type=str, default='std1')
parser.add_argument('--size', nargs=2, type=float, default=[2.5, 2.3])
parser.add_argument('--dpi', type=int, default=80)
parser.add_argument('--cols', type=int, default=6)
parser.add_argument('--xlim', nargs=2, type=float, default=None)
parser.add_argument('--ylim', nargs=2, type=float, default=None)
parser.add_argument('--ylimticks', type=boolean, default=True)
parser.add_argument('--xlabel', type=str, default=None)
parser.add_argument('--ylabel', type=str, default=None)
parser.add_argument('--xticks', type=int, default=6)
parser.add_argument('--yticks', type=int, default=5)
parser.add_argument('--labels', nargs='+', default=None)
parser.add_argument('--palette', nargs='+', default=['contrast'])
parser.add_argument('--colors', nargs='+', default={})
parser.add_argument('--maxval', type=float, default=0)
parser.add_argument('--add', nargs='+', type=str, default=[
'gamer_median', 'gamer_mean', 'record_mean',
'clipped_record_mean', 'num_seeds'])
args = parser.parse_args()
if args.subdir:
args.outdir /= args.indir[0].stem
args.indir = [d.expanduser() for d in args.indir]
args.outdir = args.outdir.expanduser()
if args.labels:
assert len(args.labels) % 2 == 0
args.labels = {k: v for k, v in zip(args.labels[:-1], args.labels[1:])}
if args.colors:
assert len(args.colors) % 2 == 0
args.colors = {k: v for k, v in zip(args.colors[:-1], args.colors[1:])}
args.tasks = [re.compile(p) for p in args.tasks]
args.methods = [re.compile(p) for p in args.methods]
args.baselines = [re.compile(p) for p in args.baselines]
if 'return' not in args.yaxis:
args.baselines = []
if args.prefix is None:
args.prefix = len(args.indir) > 1
if len(args.palette) == 1 and args.palette[0] in PALETTES:
args.palette = 10 * PALETTES[args.palette[0]]
if len(args.add) == 1 and args.add[0] == 'none':
args.add = []
return args
if __name__ == '__main__':
main(parse_args())
| 36.582255 | 79 | 0.637411 | [
"MIT"
] | HanMeh/dreamerv2 | plotting.py | 19,791 | Python |
from __future__ import division
import requests
import json
import sys
import os
from elasticsearch import Elasticsearch
from elasticsearch import exceptions
try:
# idrac_ip = os.environ['IDRAC_IP']
# idrac_username = os.environ['IDRAC_USERNAME']
# idrac_password = os.environ['IDRAC_PASSWORD']
# elastic_ip = os.environ['ELASTIC_IP']
# elastic_username = os.environ['ELASTIC_USERNAME']
# elastic_password = os.environ['ELASTIC_PASSWORD']
idrac_ip="100.98.26.49"
idrac_username="root"
idrac_password="calvin"
elastic_ip="100.98.26.172"
elastic_username="elastic"
elastic_password="changeme"
es = Elasticsearch([elastic_ip],
http_auth=(elastic_username, elastic_password),
scheme="http",
port=9200,
)
except Exception as e:
print("- FAIL: You must pass in script name along with iDRAC IP / iDRAC username / iDRAC password")
sys.exit(0)
def retrieve_logs():
index_name="lc"+idrac_ip
res=es.search(index=index_name, body={
"query":{
"range": {
"timestamp": {
"gte" : "now-5m",
"lt" : "now"
}
}
}
}
)
# print(data)
codes = {}
code_types={}
for i in res['hits']['hits']:
#print(i)
#print("\n")
for key,value in i['_source'].items():
if key=='MessageID':
code=value
code_type=value[0:3]
#print(code_type)
if code in codes:
codes[code]=codes[code]+1
else:
codes.update({code: 1})
if code_type in code_types:
code_types[code_type]=code_types[code_type]+1
else:
code_types.update({code_type: 1})
total_errors=sum(codes.values())
# print total_errors
error_percentage={}
print "\nFor Server: ",idrac_ip
# print "__________________________ \n\n\n"
print("\n\n\n")
print "Error Codes Occurrence Percentage "
print "____________________________________________ \n"
for key,value in codes.items():
error_percentage[key]= (value/total_errors)*100
print key," ",value," ",error_percentage[key],"%"
print "\n"
print "Error Types Occurrence "
print "__________________________ \n"
for key,value in code_types.items():
print key," ",value
# print(codes)
# print(code_types)
# print (total_errors)
# print error_percentage
retrieve_logs()
| 28.252632 | 103 | 0.564083 | [
"MIT"
] | collabnix/openusm | logging/logextractor/search_script.py | 2,684 | Python |
tc = int(input())
while tc:
tc -= 1
x = int(input())
if 1 <= x and x < 100:
print("Easy")
elif 100 <= x and x < 200:
print("Medium")
else:
print("Hard") | 19.6 | 30 | 0.44898 | [
"MIT"
] | foooop/competitive | CodeChef/problems/PROBCAT/main.py | 196 | Python |
"""
Module Reader Writer
This module provide the ReaderWriter class as a concrete implemenation of the AbstractReaderWriter. It handles
the implementation details of interfacing with the hardware.
"""
from controlpyweb.abstract_reader_writer import AbstractReaderWriter
import requests
import json
from typing import Union, Optional, List
import time
import threading
from controlpyweb.errors import ControlPyWebAddressNotFoundError, WebIOConnectionError
lock = threading.Lock()
class ReaderWriter(AbstractReaderWriter):
def __init__(self, url: str, demand_address_exists: bool = True, timeout: float = 10.0,
keep_alive: bool = True, **kwargs):
"""
:param url: The address of the IO Base module from/to which IO is written
"""
url = 'http://{}'.format(url) if 'http' not in url else url
url = '{}/customState.json'.format(url)
self._url = url # type: str
self._io = dict()
self._previous_read_io = dict()
self._changes = dict()
self._first_read = False
self._last_hardware_read_time = None # type: time.time
self._req = requests if not keep_alive else requests.Session()
self.update_reads_on_write = bool(kwargs.get('update_reads_on_write', False))
self.demand_address_exists = demand_address_exists
self.timeout = timeout
@property
def last_hardware_read_time(self):
return self._last_hardware_read_time
def _check_for_address(self, addr: str):
if not self.demand_address_exists:
return
if not self._first_read:
return
if self._io is None:
return
if addr not in self._io:
raise ControlPyWebAddressNotFoundError(addr)
def _get(self, timeout: float = None) -> dict:
""" Does an http get and returns the results as key/value pairs"""
timeout = self.timeout if timeout is None else timeout
self._first_read = True
r = self._req.get(self._url, timeout=timeout)
r = None if r is None else r.json()
return r
@staticmethod
def _value_to_str(value):
if isinstance(value, bool):
value = '1' if value else '0'
return str(value)
@property
def changes(self):
"""Returns a dictionary of all changes made since the last read or write"""
return self._changes
def dumps(self, changes_only: bool = False):
"""Returns the current IO key/values as json string"""
with lock:
if changes_only:
if len(self._changes) == 0:
return ''
return json.dumps(self._changes)
return json.dumps(self._io)
def flush_changes(self):
""" Erases the collection of changes stored in memory"""
with lock:
self._changes = dict()
def loads(self, json_str: str):
"""Replaces the current IO key/values with that from the json string"""
with lock:
self._first_read = True
self._io = json.loads(json_str)
def read(self, addr: str) -> Optional[Union[bool, int, float, str]]:
"""
Returns the value of a single IO from the memory store
"""
with lock:
if not self._first_read:
return None
self._check_for_address(addr)
val = self._io.get(addr)
return val
def read_immediate(self, addr: str, timeout: float = None) -> object:
"""
Makes a hardware call to the base module to retrieve the value of the IO. This is inefficient and should
be used sparingly.
"""
try:
self._check_for_address(addr)
timeout = self.timeout if timeout is None else timeout
vals = self._get(timeout=timeout)
if vals is None:
return None
return vals.get(addr)
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex)
def to_hardware(self, timeout: float = None):
""" Same as send_changes_to_hardware"""
return self.send_changes_to_hardware(timeout)
def send_changes_to_hardware(self, timeout: float = None):
""" Takes the collection of changes made using the write command and
sends them all to the hardware collectively. """
try:
with lock:
if self._changes is None or len(self._changes) == 0:
return
timeout = self.timeout if timeout is None else timeout
self._req.get(self._url, params=self._changes, timeout=timeout)
self.flush_changes()
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex)
def from_hardware(self, timeout: float = None):
""" Same as update_from_hardware"""
self.update_from_hardware(timeout)
def update_from_hardware(self, timeout: float = None):
"""Makes a hardware call to the base module to retrieve the value of all IOs, storing their
results in memory."""
try:
timeout = self.timeout if timeout is None else timeout
with lock:
vals = self._get(timeout)
self._last_hardware_read_time = time.time()
if vals is not None:
self._io = vals
self.flush_changes()
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex)
def write(self, addr: str, value: object) -> None:
"""
Stores the write value in memory to be written as part of a group write when changes are sent to
hardware."""
with lock:
to_str = self._value_to_str(value)
if self.update_reads_on_write:
self._io[addr] = value
self._changes[addr] = to_str
def write_immediate(self, addr: Union[str, List[str]],
value: Union[object, List[object]], timeout: float = None):
"""
Instead of waiting for a group write, writes the given value immediately. Note, this is not very efficient
and should be used sparingly. """
if isinstance(addr, list):
if isinstance(value, list):
items = {addr: self._value_to_str(val) for addr, val in zip(addr, value)}
else:
value = self._value_to_str(value)
items = {addr: value for addr in addr}
else:
items = {addr: self._value_to_str(value)}
try:
timeout = self.timeout if timeout is None else timeout
with lock:
self._req.get(self._url, params=items, timeout=timeout)
for addr, value in items.items():
self._io[addr] = value
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex)
| 38 | 114 | 0.613522 | [
"MIT"
] | washad/ControlPyWeb | controlpyweb/reader_writer.py | 7,144 | Python |
'''
Bootstrapped from https://github.com/NewKnowledge/imagenet and refined for D3M purposes
Original implementation from Craig Corcoran
'''
import os
import math
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications import inception_v3, mobilenet_v2, xception
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, GlobalMaxPooling2D
from tensorflow.keras.utils import to_categorical, Sequence
import logging
logger = logging.getLogger(__name__)
#logger.setLevel(logging.INFO)
class ImagenetModel:
''' A class for featurizing images using pre-trained neural nets on ImageNet
and finetuning those nets for downstream classification
'''
def __init__(self,
model='inception_v3',
weights = 'imagenet',
include_top = False,
pooling=None,
n_channels=None,
clf_head_dense_dim = 1024,
):
''' Creates ImageNet base model for featurization or classification and corresponding image
preprocessing function
:param model: options are xception, inception_v3, and mobilenet_v2
:param weights: 'imagenet' or filepath
:param include_top: whether to include original ImageNet classification head with 1000 classes
:param pooling: 'avg', 'max', or None
:param n_channels: number of channels to keep if performing featurization
:param clf_head_dense_dim: dimension of dense layer before softmax classification (only applies
if `include_top` is false)
'''
self.include_top = include_top # determines if used for classification or featurization
self.n_channels = n_channels
self.pooling = pooling
self.clf_head_dense_dim = clf_head_dense_dim
if model == 'xception':
self.model = xception.Xception(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = xception.preprocess_input
self.target_size = (299, 299)
if include_top:
self.decode = xception.decode_predictions
else:
self.output_dim = (n_channels if n_channels else 2048) * (1 if pooling else 10**2)
elif model == 'inception_v3':
self.model = inception_v3.InceptionV3(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = inception_v3.preprocess_input
self.target_size = (299, 299)
if include_top:
self.decode = inception_v3.decode_predictions
else:
self.output_dim = (n_channels if n_channels else 2048) * (1 if pooling else 8**2)
elif model == 'mobilenet_v2':
self.model = mobilenetv2.MobileNetV2(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = mobilenetv2.preprocess_input
self.target_size = (244, 244)
if include_top:
self.decode = mobilenetv2.decode_predictions
else:
self.output_dim = (n_channels if n_channels else 1280) * (1 if pooling else 7**2)
else:
raise Exception('model option not implemented')
def _load_finetune_model(
self,
nclasses = 2,
weights_path = None,
):
''' Constructs finetuning model architecture and optionally loads weights
:param nclasses: number of classes on which to softmax over
:param weights_path: optional filepath from which to try to load weights
'''
out = self.model.output
if self.pooling is None:
out = GlobalAveragePooling2D()(out)# if self.pooling == 'avg' else GlobalMaxPooling2D()(out)
dense = Dense(self.clf_head_dense_dim, activation='relu')(out)
preds = Dense(nclasses, activation='softmax')(dense)
finetune_model = Model(inputs = self.model.input, outputs = preds)
# try to load weights
if weights_path is not None:
if os.path.isfile(weights_path):
finetune_model.load_weights(weights_path)
return finetune_model
def get_features(self, images_array):
''' takes a batch of images as a 4-d array and returns the (flattened) imagenet features for those images as a 2-d array '''
if self.include_top:
raise Exception('getting features from a classification model with include_top=True is currently not supported')
if images_array.ndim != 4:
raise Exception('invalid input shape for images_array, expects a 4d array')
# preprocess and compute image features
logger.debug(f'preprocessing {images_array.shape[0]} images')
images_array = self.preprocess(images_array)
logger.debug(f'computing image features')
image_features = self.model.predict(images_array)
# if n_channels is specified, only keep that number of channels
if self.n_channels:
logger.debug(f'truncating to first {self.n_channels} channels')
image_features = image_features.T[: self.n_channels].T
# reshape output array by flattening each image into a vector of features
shape = image_features.shape
return image_features.reshape(shape[0], np.prod(shape[1:]))
def predict(self, images_array):
''' alias for get_features to more closely match scikit-learn interface '''
return self.get_features(images_array)
def finetune(self,
train_dataset,
val_dataset = None,
nclasses = 2,
top_layer_epochs = 1,
unfreeze_proportions = [0.5],
all_layer_epochs = 5,
class_weight = None,
optimizer_top = 'rmsprop',
optimizer_full = 'sgd',
callbacks = None,
num_workers = 8,
load_weights_path = None,
save_weights_path = None,
):
''' Finetunes the Imagenet model iteratively on a smaller set of images with (potentially) a smaller set of classes.
First finetunes last layer then freezes bottom N layers and retrains the rest
:param train_dataset: (X, y) pair of tf.constant tensors for training
:param val_dataset: (X, y) pair of tf.constant tensors for validation, optional
:param nclasses: number of classes
:param top_layer_epochs: how many epochs for which to finetune classification head (happens first)
:param unfreeze_proportions: list of proportions representing how much of the base ImageNet model one wants to
unfreeze (later layers unfrozen) for another round of finetuning
:param all_layer_epochs: how many epochs for which to finetune entire model (happens second)
:param class_weight: class weights (used for both training steps)
:param optimizer_top: optimizer to use for training of classification head
:param optimizer_full: optimizer to use for training full classification model
* suggest to use lower learning rate / more conservative optimizer for this step to
prevent catastrophic forgetting
:param callbacks: optional list of callbacks to use for each round of finetuning
:param num_workers: number of workers to use for multiprocess data loading
:param load_weights_path: optional filepath from which to try to load weights
:param save_weights_path: optional filepath to which to store weights
'''
finetune_model = self._load_finetune_model(
nclasses = nclasses,
weights_path=load_weights_path
)
fitting_histories = []
# freeze all convolutional InceptionV3 layers, retrain top layer
for layer in self.model.layers:
layer.trainable = False
finetune_model.compile(
optimizer=optimizer_top,
loss='categorical_crossentropy')
fitting_histories.append(
finetune_model.fit(
train_dataset,
validation_data = val_dataset,
epochs = top_layer_epochs,
class_weight = class_weight,
shuffle = True,
use_multiprocessing = True,
workers = num_workers,
callbacks = callbacks
)
)
# iteratively unfreeze specified proportion of later ImageNet base layers and finetune
finetune_model.compile(
# SGD(lr=0.0001, momentum=0.9)
optimizer=optimizer_full,
loss='categorical_crossentropy')
for p in unfreeze_proportions:
freeze_count = int(len(self.model.layers) * p)
for layer in finetune_model.layers[:freeze_count]:
layer.trainable = False
for layer in finetune_model.layers[freeze_count:]:
layer.trainable = True
fitting_histories.append(
finetune_model.fit(
train_dataset,
validation_data = val_dataset,
epochs = all_layer_epochs,
class_weight = class_weight,
shuffle = True,
use_multiprocessing = True,
workers = num_workers,
callbacks = callbacks
)
)
# save weights
if save_weights_path is not None:
finetune_model.save_weights(save_weights_path)
return fitting_histories
def finetune_classify(self,
test_dataset,
nclasses = 2,
num_workers = 8,
load_weights_path = None,
):
''' Uses the finetuned model to predict on a test dataset.
:param test_dataset: X, tf.constant tensor for inference
:param nclasses: number of classes
:param num_workers: number of workers to use for multiprocess data loading
:return: array of softmaxed prediction probabilities
:param load_weights_path: optional filepath from which to try to load weights
'''
finetune_model = self._load_finetune_model(
nclasses = nclasses,
weights_path = load_weights_path
)
return finetune_model.predict_generator(test_dataset,
use_multiprocessing = True,
workers = num_workers
)
class ImageNetGen(Sequence):
""" Tf.Keras Sequence for ImageNet input data """
def __init__(self, X, y = None, batch_size = 32):
self.X = X
self.y = y
self.batch_size = batch_size
def __len__(self):
return math.ceil(self.X.shape[0] / self.batch_size)
def __getitem__(self, idx):
batch_x = self.X[idx * self.batch_size:(idx + 1) * self.batch_size]
if self.y is None:
return tf.constant(batch_x)
else:
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return tf.constant(batch_x), tf.constant(batch_y)
| 43.208333 | 132 | 0.62111 | [
"MIT"
] | Yonder-OSS/D3M-Primitives | primitives/image_classification/utils/imagenet.py | 11,407 | Python |
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], '../../../'))
import script.rio as io
import script.initial_condition.noh1D as noh1D
# Domain properties
lx = 1.0
ly = 1.0
Nx = 512
Ny = 1
# Scheme execution options
T = 0.6
CFL = 0.5
gamma = 5./3.
BClayer = 1
quantityList = ['rho', 'rhou_x', 'rhou_y', 'rhoE']
def buildme(quantityDict, coords_to_uid, coords_to_bc):
noh1D.build(quantityDict, coords_to_uid, coords_to_bc, Nx, Ny, lx, ly, BClayer)
| 18.8 | 83 | 0.680851 | [
"MIT"
] | will-iam/Variant | casepy/eulerRuO1/nNoh512x1/chars.py | 470 | Python |
nome = input('Qual é o seu nome? ')
print('Olá', nome + '! Prazer em te conhecer!')
| 28 | 47 | 0.619048 | [
"Unlicense"
] | ZaikoXander/Python | Curso/Mundo 1/02.py | 86 | Python |
element_list = ["X", "H", "He", "Li", "Be", "B", "C", "N", "O", "F", "Ne", "Na", "Mg", "Al", "Si", "P", "S", "Cl", "Ar",
"K", "Ca", "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn", "Ga", "Ge", "As", "Se", "Br",
"Kr", "Rb", "Sr", "Y", "Zr", "Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd", "In", "Sn", "Sb", "Te",
"I", "Xe", "Cs", "Ba", "La", "Ce", "Pr", "Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er", "Tm",
"Yb", "Lu", "Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au", "Hg", "Tl", "Pb", "Bi", "Po", "At", "Rn",
"Fr", "Ra", "Ac", "Th", "Pa", "U", "Np", "Pu", "Am", "Cm", "Bk", "Cf", "Es", "Fm", "Md", "No", "Lr",
"15N", "14N", "Hex", "HexNAc", "dHex", "NeuAc", "Pent", "18O", "Hep", "NeuGc", "2H", "13C"]
| 101.625 | 120 | 0.306273 | [
"Apache-2.0"
] | pFindStudio/pDeep3 | pDeep/config/element.py | 813 | Python |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import argparse
import json
from .object_detection import build_model, download_dataset, optimize_model, benchmark_model
def test(test_config_path):
"""Runs an object detection test configuration
This runs an object detection test configuration. This involves
1. Download and build a model architecture (or use cached).
2. Optimize the model architecrue
3. Benchmark the optimized model against a dataset
4. (optional) Run assertions to check the benchmark output
The input to this function is a JSON file which specifies the test
configuration.
example_test_config.json:
{
"model_config": { ... },
"optimization_config": { ... },
"benchmark_config": { ... },
"assertions": [ ... ]
}
model_config: A dictionary of arguments passed to build_model, which
specify the pre-optimized model architure. The model will be passed
to optimize_model.
optimization_config: A dictionary of arguments passed to optimize_model.
Please see help(optimize_model) for more details.
benchmark_config: A dictionary of arguments passed to benchmark_model.
Please see help(benchmark_model) for more details.
assertions: A list of strings containing python code that will be
evaluated. If the code returns false, an error will be thrown. These
assertions can reference any variables local to this 'test' function.
Some useful values are
statistics['map']
statistics['avg_latency']
statistics['avg_throughput']
Args
----
test_config_path: A string corresponding to the test configuration
JSON file.
"""
with open(args.test_config_path, 'r') as f:
test_config = json.load(f)
print(json.dumps(test_config, sort_keys=True, indent=4))
frozen_graph = build_model(
**test_config['model_config'])
# optimize model using source model
frozen_graph = optimize_model(
frozen_graph,
**test_config['optimization_config'])
# benchmark optimized model
statistics = benchmark_model(
frozen_graph=frozen_graph,
**test_config['benchmark_config'])
# print some statistics to command line
print_statistics = statistics
if 'runtimes_ms' in print_statistics:
print_statistics.pop('runtimes_ms')
print(json.dumps(print_statistics, sort_keys=True, indent=4))
# run assertions
if 'assertions' in test_config:
for a in test_config['assertions']:
if not eval(a):
raise AssertionError('ASSERTION FAILED: %s' % a)
else:
print('ASSERTION PASSED: %s' % a)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'test_config_path',
help='Path of JSON file containing test configuration. Please'
'see help(tftrt.examples.object_detection.test) for more information')
args=parser.parse_args()
test(args.test_config_path)
| 36.188679 | 92 | 0.67049 | [
"Apache-2.0"
] | HubBucket-Team/tensorrt | tftrt/examples/object_detection/test.py | 3,836 | Python |
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test some utilities for working with JSON and PyMongo."""
import datetime
import json
import re
import sys
import uuid
sys.path[0:0] = [""]
from bson import json_util, EPOCH_AWARE, SON
from bson.json_util import (DatetimeRepresentation,
STRICT_JSON_OPTIONS)
from bson.binary import (ALL_UUID_REPRESENTATIONS, Binary, MD5_SUBTYPE,
USER_DEFINED_SUBTYPE, UuidRepresentation, STANDARD)
from bson.code import Code
from bson.dbref import DBRef
from bson.int64 import Int64
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.regex import Regex
from bson.timestamp import Timestamp
from bson.tz_util import FixedOffset, utc
from test import unittest, IntegrationTest
PY3 = sys.version_info[0] == 3
class TestJsonUtil(unittest.TestCase):
def round_tripped(self, doc, **kwargs):
return json_util.loads(json_util.dumps(doc, **kwargs), **kwargs)
def round_trip(self, doc, **kwargs):
self.assertEqual(doc, self.round_tripped(doc, **kwargs))
def test_basic(self):
self.round_trip({"hello": "world"})
def test_json_options_with_options(self):
opts = json_util.JSONOptions(
datetime_representation=DatetimeRepresentation.NUMBERLONG)
self.assertEqual(
opts.datetime_representation, DatetimeRepresentation.NUMBERLONG)
opts2 = opts.with_options(
datetime_representation=DatetimeRepresentation.ISO8601)
self.assertEqual(
opts2.datetime_representation, DatetimeRepresentation.ISO8601)
opts = json_util.JSONOptions(strict_number_long=True)
self.assertEqual(opts.strict_number_long, True)
opts2 = opts.with_options(strict_number_long=False)
self.assertEqual(opts2.strict_number_long, False)
opts = json_util.CANONICAL_JSON_OPTIONS
self.assertNotEqual(
opts.uuid_representation, UuidRepresentation.JAVA_LEGACY)
opts2 = opts.with_options(
uuid_representation=UuidRepresentation.JAVA_LEGACY)
self.assertEqual(
opts2.uuid_representation, UuidRepresentation.JAVA_LEGACY)
self.assertEqual(opts2.document_class, dict)
opts3 = opts2.with_options(document_class=SON)
self.assertEqual(
opts3.uuid_representation, UuidRepresentation.JAVA_LEGACY)
self.assertEqual(opts3.document_class, SON)
def test_objectid(self):
self.round_trip({"id": ObjectId()})
def test_dbref(self):
self.round_trip({"ref": DBRef("foo", 5)})
self.round_trip({"ref": DBRef("foo", 5, "db")})
self.round_trip({"ref": DBRef("foo", ObjectId())})
# Check order.
self.assertEqual(
'{"$ref": "collection", "$id": 1, "$db": "db"}',
json_util.dumps(DBRef('collection', 1, 'db')))
def test_datetime(self):
# only millis, not micros
self.round_trip({"date": datetime.datetime(2009, 12, 9, 15,
49, 45, 191000, utc)})
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
# No explicit offset
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
# Localtime behind UTC
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
# Localtime ahead of UTC
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
dtm = datetime.datetime(1, 1, 1, 1, 1, 1, 0, utc)
jsn = '{"dt": {"$date": -62135593139000}}'
self.assertEqual(dtm, json_util.loads(jsn)["dt"])
jsn = '{"dt": {"$date": {"$numberLong": "-62135593139000"}}}'
self.assertEqual(dtm, json_util.loads(jsn)["dt"])
# Test dumps format
pre_epoch = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000, utc)}
post_epoch = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc)}
self.assertEqual(
'{"dt": {"$date": -62135593138990}}',
json_util.dumps(pre_epoch))
self.assertEqual(
'{"dt": {"$date": 63075661010}}',
json_util.dumps(post_epoch))
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch, json_options=STRICT_JSON_OPTIONS))
self.assertEqual(
'{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}',
json_util.dumps(post_epoch, json_options=STRICT_JSON_OPTIONS))
number_long_options = json_util.JSONOptions(
datetime_representation=DatetimeRepresentation.NUMBERLONG)
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "63075661010"}}}',
json_util.dumps(post_epoch, json_options=number_long_options))
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch, json_options=number_long_options))
# ISO8601 mode assumes naive datetimes are UTC
pre_epoch_naive = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000)}
post_epoch_naive = {
"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000)}
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch_naive, json_options=STRICT_JSON_OPTIONS))
self.assertEqual(
'{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}',
json_util.dumps(post_epoch_naive,
json_options=STRICT_JSON_OPTIONS))
# Test tz_aware and tzinfo options
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}')["dt"])
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}',
json_options=json_util.JSONOptions(tz_aware=True,
tzinfo=utc))["dt"])
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}',
json_options=json_util.JSONOptions(tz_aware=False))["dt"])
self.round_trip(pre_epoch_naive, json_options=json_util.JSONOptions(
tz_aware=False))
# Test a non-utc timezone
pacific = FixedOffset(-8 * 60, 'US/Pacific')
aware_datetime = {"dt": datetime.datetime(2002, 10, 27, 6, 0, 0, 10000,
pacific)}
self.assertEqual(
'{"dt": {"$date": "2002-10-27T06:00:00.010-0800"}}',
json_util.dumps(aware_datetime, json_options=STRICT_JSON_OPTIONS))
self.round_trip(aware_datetime, json_options=json_util.JSONOptions(
tz_aware=True, tzinfo=pacific))
self.round_trip(aware_datetime, json_options=json_util.JSONOptions(
datetime_representation=DatetimeRepresentation.ISO8601,
tz_aware=True, tzinfo=pacific))
def test_regex_object_hook(self):
# Extended JSON format regular expression.
pat = 'a*b'
json_re = '{"$regex": "%s", "$options": "u"}' % pat
loaded = json_util.object_hook(json.loads(json_re))
self.assertTrue(isinstance(loaded, Regex))
self.assertEqual(pat, loaded.pattern)
self.assertEqual(re.U, loaded.flags)
def test_regex(self):
for regex_instance in (
re.compile("a*b", re.IGNORECASE),
Regex("a*b", re.IGNORECASE)):
res = self.round_tripped({"r": regex_instance})["r"]
self.assertEqual("a*b", res.pattern)
res = self.round_tripped({"r": Regex("a*b", re.IGNORECASE)})["r"]
self.assertEqual("a*b", res.pattern)
self.assertEqual(re.IGNORECASE, res.flags)
unicode_options = re.I|re.M|re.S|re.U|re.X
regex = re.compile("a*b", unicode_options)
res = self.round_tripped({"r": regex})["r"]
self.assertEqual(unicode_options, res.flags)
# Some tools may not add $options if no flags are set.
res = json_util.loads('{"r": {"$regex": "a*b"}}')['r']
self.assertEqual(0, res.flags)
self.assertEqual(
Regex('.*', 'ilm'),
json_util.loads(
'{"r": {"$regex": ".*", "$options": "ilm"}}')['r'])
# Check order.
self.assertEqual(
'{"$regex": ".*", "$options": "mx"}',
json_util.dumps(Regex('.*', re.M | re.X)))
self.assertEqual(
'{"$regex": ".*", "$options": "mx"}',
json_util.dumps(re.compile(b'.*', re.M | re.X)))
def test_minkey(self):
self.round_trip({"m": MinKey()})
def test_maxkey(self):
self.round_trip({"m": MaxKey()})
def test_timestamp(self):
dct = {"ts": Timestamp(4, 13)}
res = json_util.dumps(dct, default=json_util.default)
rtdct = json_util.loads(res)
self.assertEqual(dct, rtdct)
self.assertEqual('{"ts": {"$timestamp": {"t": 4, "i": 13}}}', res)
def test_uuid(self):
doc = {'uuid': uuid.UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')}
self.round_trip(doc)
self.assertEqual(
'{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}',
json_util.dumps(doc))
self.assertEqual(
'{"uuid": '
'{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}',
json_util.dumps(
doc, json_options=json_util.STRICT_JSON_OPTIONS))
self.assertEqual(
'{"uuid": '
'{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}',
json_util.dumps(
doc, json_options=json_util.JSONOptions(
strict_uuid=True, uuid_representation=STANDARD)))
self.assertEqual(
doc, json_util.loads(
'{"uuid": '
'{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}'))
for uuid_representation in (set(ALL_UUID_REPRESENTATIONS) -
{UuidRepresentation.UNSPECIFIED}):
options = json_util.JSONOptions(
strict_uuid=True, uuid_representation=uuid_representation)
self.round_trip(doc, json_options=options)
# Ignore UUID representation when decoding BSON binary subtype 4.
self.assertEqual(doc, json_util.loads(
'{"uuid": '
'{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}',
json_options=options))
def test_uuid_uuid_rep_unspecified(self):
_uuid = uuid.uuid4()
options = json_util.JSONOptions(
strict_uuid=True,
uuid_representation=UuidRepresentation.UNSPECIFIED)
# Cannot directly encode native UUIDs with UNSPECIFIED.
doc = {'uuid': _uuid}
with self.assertRaises(ValueError):
json_util.dumps(doc, json_options=options)
# All UUID subtypes are decoded as Binary with UNSPECIFIED.
# subtype 3
doc = {'uuid': Binary(_uuid.bytes, subtype=3)}
ext_json_str = json_util.dumps(doc)
self.assertEqual(
doc, json_util.loads(ext_json_str, json_options=options))
# subtype 4
doc = {'uuid': Binary(_uuid.bytes, subtype=4)}
ext_json_str = json_util.dumps(doc)
self.assertEqual(
doc, json_util.loads(ext_json_str, json_options=options))
# $uuid-encoded fields
doc = {'uuid': Binary(_uuid.bytes, subtype=4)}
ext_json_str = json_util.dumps({'uuid': _uuid})
self.assertEqual(
doc, json_util.loads(ext_json_str, json_options=options))
def test_binary(self):
if PY3:
bin_type_dict = {"bin": b"\x00\x01\x02\x03\x04"}
else:
bin_type_dict = {"bin": Binary(b"\x00\x01\x02\x03\x04")}
md5_type_dict = {
"md5": Binary(b' n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac',
MD5_SUBTYPE)}
custom_type_dict = {"custom": Binary(b"hello", USER_DEFINED_SUBTYPE)}
self.round_trip(bin_type_dict)
self.round_trip(md5_type_dict)
self.round_trip(custom_type_dict)
# Binary with subtype 0 is decoded into bytes in Python 3.
bin = json_util.loads(
'{"bin": {"$binary": "AAECAwQ=", "$type": "00"}}')['bin']
if PY3:
self.assertEqual(type(bin), bytes)
else:
self.assertEqual(type(bin), Binary)
# PYTHON-443 ensure old type formats are supported
json_bin_dump = json_util.dumps(bin_type_dict)
self.assertTrue('"$type": "00"' in json_bin_dump)
self.assertEqual(bin_type_dict,
json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}'))
json_bin_dump = json_util.dumps(md5_type_dict)
# Check order.
self.assertEqual(
'{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==",'
+ ' "$type": "05"}}',
json_bin_dump)
self.assertEqual(md5_type_dict,
json_util.loads('{"md5": {"$type": 5, "$binary":'
' "IG43GK8JL9HRL4DK53HMrA=="}}'))
json_bin_dump = json_util.dumps(custom_type_dict)
self.assertTrue('"$type": "80"' in json_bin_dump)
self.assertEqual(custom_type_dict,
json_util.loads('{"custom": {"$type": 128, "$binary":'
' "aGVsbG8="}}'))
# Handle mongoexport where subtype >= 128
self.assertEqual(128,
json_util.loads('{"custom": {"$type": "ffffff80", "$binary":'
' "aGVsbG8="}}')['custom'].subtype)
self.assertEqual(255,
json_util.loads('{"custom": {"$type": "ffffffff", "$binary":'
' "aGVsbG8="}}')['custom'].subtype)
def test_code(self):
self.round_trip({"code": Code("function x() { return 1; }")})
code = Code("return z", z=2)
res = json_util.dumps(code)
self.assertEqual(code, json_util.loads(res))
# Check order.
self.assertEqual('{"$code": "return z", "$scope": {"z": 2}}', res)
no_scope = Code('function() {}')
self.assertEqual(
'{"$code": "function() {}"}', json_util.dumps(no_scope))
def test_undefined(self):
jsn = '{"name": {"$undefined": true}}'
self.assertIsNone(json_util.loads(jsn)['name'])
def test_numberlong(self):
jsn = '{"weight": {"$numberLong": "65535"}}'
self.assertEqual(json_util.loads(jsn)['weight'],
Int64(65535))
self.assertEqual(json_util.dumps({"weight": Int64(65535)}),
'{"weight": 65535}')
json_options = json_util.JSONOptions(strict_number_long=True)
self.assertEqual(json_util.dumps({"weight": Int64(65535)},
json_options=json_options),
jsn)
def test_loads_document_class(self):
# document_class dict should always work
self.assertEqual({"foo": "bar"}, json_util.loads(
'{"foo": "bar"}',
json_options=json_util.JSONOptions(document_class=dict)))
self.assertEqual(SON([("foo", "bar"), ("b", 1)]), json_util.loads(
'{"foo": "bar", "b": 1}',
json_options=json_util.JSONOptions(document_class=SON)))
class TestJsonUtilRoundtrip(IntegrationTest):
def test_cursor(self):
db = self.db
db.drop_collection("test")
docs = [
{'foo': [1, 2]},
{'bar': {'hello': 'world'}},
{'code': Code("function x() { return 1; }")},
{'bin': Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)},
{'dbref': {'_ref': DBRef('simple',
ObjectId('509b8db456c02c5ab7e63c34'))}}
]
db.test.insert_many(docs)
reloaded_docs = json_util.loads(json_util.dumps(db.test.find()))
for doc in docs:
self.assertTrue(doc in reloaded_docs)
if __name__ == "__main__":
unittest.main()
| 43.10643 | 79 | 0.581503 | [
"Apache-2.0"
] | Olegt0rr/mongo-python-driver | test/test_json_util.py | 19,441 | Python |
#! -*- coding: utf-8 -*-
#
# (C) 2013 Internet Initiative Japan Inc.
# All rights reserved.
#
# Created on 2013/05/15
# @author: [email protected]
"""Notify project owner with email when the project created successfully."""
from pkg_resources import resource_filename
from trac.config import Option, ListOption
from trac.core import Component, implements
from trac.notification import Notify, NotifyEmail
from trac.web.chrome import ITemplateProvider
from tracportal.i18n import _
from tracportal.project.api import IProjectCreationInterceptor
class ProjectCreationNotificationSystem(Component):
implements(ITemplateProvider, IProjectCreationInterceptor)
# options
from_name = Option('tracportal', 'notify_email_from_name', doc=_('Sender name to use in notification emails.'))
from_email = Option('tracportal', 'notify_email_from', doc=_('Sender address to use in notification emails.'))
ccrcpts = ListOption('tracportal', 'notify_email_cc',
doc=_('Email address(es) to always send notifications to, '
'addresses can be seen by all recipients (Cc:).'))
subject = Option('tracportal', 'notify_email_subject', default=_("Ready to start Trac project!"),
doc=_('Subject in notification emails.'))
# ITemplateProvider methods
def get_templates_dirs(self):
return [resource_filename(__name__, 'templates')]
def get_htdocs_dirs(self):
return []
# IProjectCreationInterceptor methods
def pre_process(self, project_info, owner_info):
pass
def post_process(self, project_info, owner_info, env):
if 'email' in owner_info:
project_info['url'] = env.abs_href()
support = {
'name': self.from_name or self.env.project_name,
'email': self.from_email or self.env.config.get('notification', 'smtp_from'),
}
notify_email = ProjectCreationNotifyEmail(self.env, (owner_info['email'],), tuple(self.ccrcpts),
project_info, owner_info, support)
notify_email.notify('')
class ProjectCreationNotifyEmail(NotifyEmail):
"""Notification of a project creation."""
template_name = 'project_creation_notify_email.txt'
def __init__(self, env, torcpts, ccrcpts, project_info, owner_info, support):
NotifyEmail.__init__(self, env)
self.torcpts = torcpts
self.ccrcpts = ccrcpts
self.project_info = project_info
self.owner_info = owner_info
self.support = support
self.subject = self.subject
def get_recipients(self, resid):
return (self.torcpts, self.ccrcpts,)
def notify(self, resid, subject=None, author=None):
if subject:
self.subject = subject
self.from_name = self.support['name']
self.from_email = self.support['email']
self.replyto_email = self.support['email']
if self.data is None:
self.data = {}
self.data.update({
'owner': self.owner_info,
'project': self.project_info,
'support': self.support,
})
Notify.notify(self, resid)
| 37.523256 | 115 | 0.654478 | [
"MIT"
] | iij/TracPortalPlugin | tracportalopt/project/notification.py | 3,227 | Python |
# -*- coding: utf-8 -*-
#
# python_exameple documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 26 00:29:33 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyBAScloudAPI'
copyright = u'2021, ProFM Facility & Project Management GmbH'
author = u'ProFM Facility & Project Management GmbH'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2.0'
# The full version, including alpha/beta/rc tags.
release = u'0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyBAScloudAPIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyBAScloudAPI.tex', u'pyBAScloudAPI Documentation',
u'ProFM Facility & Project Management GmbH', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyBAScloudAPI', u'pyBAScloudAPI Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyBAScloudAPI', u'pyBAScloudAPI Documentation',
author, 'pyBAScloudAPI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 32.511864 | 79 | 0.719737 | [
"MIT"
] | bascloud/BASCloudAPI | pyBAScloudAPI/docs/conf.py | 9,591 | Python |
from ipdb import set_trace as st
from icecream import ic
import gc
import os
import wandb
import pandas as pd
from fastprogress import progress_bar
from loguru import logger
import numpy as np
import torch
from sklearn.metrics import accuracy_score
import utils as U
import configuration as C
import result_handler as rh
from criterion import mixup_criterion
from early_stopping import EarlyStopping
def train_cv(config):
# config
debug = config['globals']['debug']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_fold = config['split']['n_fold']
n_epoch = config['globals']['num_epochs']
path_trn_tp = config['path']['path_train_tp']
n_classes = config['model']['params']['n_classes']
dir_save_exp, dir_save_ignore_exp, _ = U.get_save_dir_exp(config)
# load data
pwd = os.path.dirname(os.path.abspath(__file__))
trn_tp = pd.read_csv(f'{pwd}/{path_trn_tp}')
# init
acc_val_folds = []
lwlrap_val_folds = []
if debug:
oof_sig = np.zeros([n_classes*n_fold, n_classes])
else:
oof_sig = np.zeros([len(trn_tp), n_classes])
for i_fold in progress_bar(range(n_fold)):
# logger
logger.info("-" * 18)
logger.info(f'\tFold {i_fold + 1}/{n_fold}')
logger.info("-" * 18)
# preparation
model = C.get_model(config).to(device)
criterion = C.get_criterion(config)
optimizer = C.get_optimizer(model, config)
scheduler = C.get_scheduler(optimizer, config)
_, _, exp_name = U.get_save_dir_exp(config)
# wandb
wb_fold = wandb.init(project='kaggle-rfcx',
group=exp_name,
name=f'fold{i_fold}')
wb_fold.config.config = config
epochs = []
losses_trn = []
losses_val = []
accs_val = []
lwlraps_val = []
best_acc_val = 0
best_lwlrap_val = 0
best_loss_val = 0
best_output_sig = 0
save_path = f'{dir_save_ignore_exp}/'\
f'{model.__class__.__name__}_fold{i_fold}.pth'
early_stopping = EarlyStopping(patience=12,
verbose=True,
path=save_path,
trace_func=logger.info)
for epoch in range(1, n_epoch+1):
# 学習を行う
result_dict = train_fold(i_fold, trn_tp, model,
criterion, optimizer,
scheduler, config)
val_idxs = result_dict['val_idxs']
output_sig = result_dict['output_sig']
loss_trn = result_dict['loss_trn']
loss_val = result_dict['loss_val']
acc_val = result_dict['acc_val']
lwlrap_val = result_dict['lwlrap_val']
logger.info(f'[fold({i_fold+1})epoch({epoch})]'
f'loss_trn={loss_trn:.6f} '
f'loss_val={loss_val:.6f} '
f'acc_val={acc_val:.6f} '
f'lwlrap_val={lwlrap_val:.6f}')
wb_fold.log({'epoch': epoch,
'loss_trn': loss_trn,
'loss_val': loss_val,
'acc_val': acc_val,
'lwlrap_val': lwlrap_val})
# 格納
epochs.append(int(epoch))
losses_trn.append(loss_trn)
losses_val.append(loss_val)
accs_val.append(acc_val)
lwlraps_val.append(lwlrap_val)
# best model ?
is_update = early_stopping(loss_val, result_dict['model'], debug)
if is_update:
best_loss_val = loss_val
best_acc_val = acc_val
best_lwlrap_val = lwlrap_val
best_output_sig = output_sig
wb_fold.summary['loss_val'] = best_loss_val
wb_fold.summary['acc_val'] = best_acc_val
wb_fold.summary['lwlrap_val'] = best_lwlrap_val
if early_stopping.early_stop:
logger.info("Early stopping")
break
wb_fold.finish()
# result
rh.save_plot_figure(i_fold, epochs, losses_trn, accs_val, lwlraps_val,
losses_val, dir_save_exp)
rh.save_result_csv(i_fold, best_loss_val, best_acc_val, best_lwlrap_val,
dir_save_exp, config)
# --- fold end ---
# oof_sig
acc_val_folds.append(best_acc_val)
lwlrap_val_folds.append(best_lwlrap_val)
if debug:
oof_sig[i_fold*n_classes:(i_fold+1)*n_classes] = best_output_sig
else:
oof_sig[val_idxs, :] = best_output_sig
logger.info(f'best_loss_val: {best_loss_val:.6f}, '
f'best_acc_val: {best_acc_val:.6f}, '
f'best_lwlrap_val: {best_lwlrap_val:.6f}')
oof = np.argmax(oof_sig, axis=1)
oof_sig = torch.tensor(oof_sig)
labels = np.zeros([len(oof), 24], dtype=int)
if debug:
# 適当な値を答えとする
labels[:, 0] = 1
labels = torch.tensor(labels)
acc_oof = accuracy_score(np.zeros(len(oof)), oof)
lwlrap_oof = U.LWLRAP(oof_sig, labels)
else:
for i_id, id_ in enumerate(trn_tp['species_id'].values):
labels[i_id][id_] = 1
labels = torch.tensor(labels)
acc_oof = accuracy_score(trn_tp['species_id'].values, oof)
lwlrap_oof = U.LWLRAP(oof_sig, labels)
# acc_val_folds
acc_val_folds_mean = np.mean(acc_val_folds)
acc_val_folds_std = np.std(acc_val_folds)
logger.info(f'acc_folds(mean, std): '
f'{acc_val_folds_mean:.6f} +- {acc_val_folds_std:6f}')
logger.info(f'acc_oof: {acc_oof:6f}')
# lwlrap_val_folds
lwlrap_val_folds_mean = np.mean(lwlrap_val_folds)
lwlrap_val_folds_std = np.std(lwlrap_val_folds)
logger.info(f'lwlrap_folds(mean, std): '
f'{lwlrap_val_folds_mean:.6f} +- {lwlrap_val_folds_std:6f}')
logger.info(f'lwlrap_oof: {lwlrap_oof:6f}')
# wandb
wb_summary = wandb.init(project='kaggle-rfcx',
group=exp_name,
name='summary')
wb_summary.config.config = config
wb_summary.log({'acc_val_folds_mean': acc_val_folds_mean,
'acc_val_folds_std': acc_val_folds_std,
'acc_oof': acc_oof,
'lwlrap_val_folds_mean': lwlrap_val_folds_mean,
'lwlrap_val_folds_std': lwlrap_val_folds_std,
'lwlrap_oof': lwlrap_oof})
wb_summary.finish()
# 開放
del result_dict
del model
del optimizer
del scheduler
gc.collect()
torch.cuda.empty_cache()
def train_fold(i_fold, trn_tp, model,
criterion, optimizer,
scheduler, config):
mixup = config['globals']['mixup']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
trn_idxs, val_idxs = C.get_index_fold(trn_tp, i_fold, config)
trn_tp_trn = trn_tp.iloc[trn_idxs].reset_index(drop=True)
trn_tp_val = trn_tp.iloc[val_idxs].reset_index(drop=True)
trn_loader = C.get_trn_val_loader(trn_tp_trn, 'train', config)
val_loader = C.get_trn_val_loader(trn_tp_val, 'valid', config)
# train
model.train()
epoch_train_loss = 0
for batch_idx, (data, target) in enumerate(trn_loader):
data, target = data.to(device), target.to(device)
if mixup:
data, targets_a, targets_b, lam = U.mixup_data(data,
target,
alpha=1.0)
optimizer.zero_grad()
output = model(data)
if mixup:
loss = mixup_criterion(criterion, output,
targets_a, targets_b, lam)
else:
loss = criterion(output, target)
loss.backward()
optimizer.step()
epoch_train_loss += loss.item()*data.size(0)
scheduler.step()
loss_trn = epoch_train_loss / len(trn_loader.dataset)
del data
# eval valid
loss_val, acc_val, lwlrap_val, output_sig = get_loss_score(model,
val_loader,
criterion,
device)
result_dict = {
'model': model,
'val_idxs': val_idxs,
'output_sig': output_sig,
'loss_trn': loss_trn,
'loss_val': loss_val,
'acc_val': acc_val,
'lwlrap_val': lwlrap_val
}
return result_dict
def get_loss_score(model, val_loader, criterion, device):
model.eval()
epoch_valid_loss = 0
y_pred_list = []
y_true_list = []
output_sig_list = []
lwlrap_val = 0
for batch_idx, (data, target) in enumerate(val_loader):
data, target = data.to(device), target.to(device)
output = model(data)
loss = criterion(output, target)
epoch_valid_loss += loss.item()*data.size(0)
output_ = output['output']
output_sig = output['output_sigmoid']
output_sig = output_sig.detach().cpu().numpy()
_y_pred = output_.detach().cpu().numpy().argmax(axis=1)
_y_true = target.detach().cpu().numpy().argmax(axis=1)
y_pred_list.append(_y_pred)
y_true_list.append(_y_true)
output_sig_list.append(output_sig)
lwlrap_val += U.LWLRAP(output_, target) / len(val_loader)
loss_val = epoch_valid_loss / len(val_loader.dataset)
y_pred = np.concatenate(y_pred_list, axis=0)
y_true = np.concatenate(y_true_list, axis=0)
output_sig = np.concatenate(output_sig_list, axis=0)
acc_val = accuracy_score(y_true, y_pred)
del data
return loss_val, acc_val, lwlrap_val, output_sig
| 36.870849 | 80 | 0.572258 | [
"MIT"
] | fkubota/kaggle-Rainforest-Connection-Species-Audio-Detection | exp/exp002/trainner.py | 10,030 | Python |
# Generated by Django 2.2.6 on 2019-12-28 22:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FoodStore', '0002_auto_20191209_0246'),
]
operations = [
migrations.AddField(
model_name='foodhomepagemodel',
name='PageComplete',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='fullmenupagemodel',
name='PageComplete',
field=models.BooleanField(default=False),
),
]
| 24.041667 | 53 | 0.601386 | [
"MIT"
] | CPU-sangoma/PlentyPot | FoodStore/migrations/0003_auto_20191229_0057.py | 577 | Python |
#!/usr/bin/python
import os
import string
call = " hook(10);\n";
call = " hook(10); hook2(10);hook3(0);hook4(0);\n";
def run_test(num_calls, compiler_command):
f = open("program_options_test.cpp", "w")
f.write("""#include <boost/program_options.hpp>
using namespace boost::program_options;
void do_it()
{
boost::program_options::options_description desc;
desc.add_options()
""")
for i in range(0, num_calls):
f.write("(\"opt%d\", value<int>())\n")
f.write(";\n}\n")
f.close()
os.system(compiler_command + " -c -save-temps -I /home/ghost/Work/boost-rc program_options_test.cpp")
nm = os.popen("nm -S program_options_test.o")
for l in nm:
if string.find(l, "Z5do_itv") != -1:
break
size = int(string.split(l)[1], 16)
return size
def run_tests(range, compiler_command):
last_size = None
first_size = None
for num in range:
size = run_test(num, compiler_command)
if last_size:
print "%2d calls: %5d bytes (+ %d)" % (num, size, size-last_size)
else:
print "%2d calls: %5d bytes" % (num, size)
first_size = size
last_size = size
print "Avarage: ", (last_size-first_size)/(range[-1]-range[0])
if __name__ == '__main__':
for compiler in [ "g++-3.3 -Os", "g++-3.3 -O3", "g++-3.4 -Os", "g++-3.4 -O3"]:
print "****", compiler, "****"
run_tests(range(1, 20), compiler)
| 26.203704 | 104 | 0.601413 | [
"MIT"
] | angel2230/-tlbb | Game/boost/libs/program_options/test/program_options_size_test.py | 1,415 | Python |
import cv2
import numpy as np
from PIL import Image
def draw_approx_polyDP(cnt, epsilon=0.01, closed=True):
"""用多边形来近似的表示曲线"""
epsilon = epsilon * cv2.arcLength(cnt, closed) # 得到轮廓的周长信息作为参考值
return cv2.approxPolyDP(cnt, epsilon, closed) # 得到近似多边形框
def draw_convex_hull(cnt):
"""画凸包,传入的是一些点"""
return cv2.convexHull(cnt) # 获取处理过的轮廓信息
def show_img(file_name, window_name='win'):
img = cv2.imread(file_name)
cv2.imshow(window_name, img)
# 按任意键,图片消失
cv2.waitKey()
cv2.destroyAllWindows()
def camera_show(window_name='camera'):
"""最好在改进一下关闭窗口部分的功能
建立一个窗口捕捉摄像头显示的内容
当左键点击过窗口,且按过任意键盘键,才会退出窗口"""
clicked = False
camera_capture = cv2.VideoCapture(0)
def on_mouse(event, x, y, flags, param):
global clicked
if event == cv2.EVENT_LBUTTONUP:
clicked = True
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, on_mouse)
success, frame = camera_capture.read()
# cv2.waitKey(1) 参数表示等待键盘触发的时间,返回值为-1表示没有见按下
while success and cv2.waitKey(1) == -1 and not clicked:
cv2.imshow(window_name, frame)
success, frame = camera_capture.read()
cv2.destroyAllWindows()
camera_capture.release()
def camera_save(file_name, seconds=3, fps=60):
# 获得设备
camera_capture = cv2.VideoCapture(0)
size = (int(camera_capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(camera_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
video_writer = cv2.VideoWriter(file_name, cv2.VideoWriter_fourcc('I', '4', '2', '0'), fps, size)
success, frame = camera_capture.read()
num_frames_remaining = seconds * fps - 1
while success and num_frames_remaining > 0:
video_writer.write(frame)
success, frame = camera_capture.read()
num_frames_remaining -= 1
camera_capture.release()
def copy(orig_img, start_height, start_width, part):
height, width = part.shape
orig_img[start_height: start_height + height, start_width: start_width + width] = part
return orig_img
def draw_gray_random(height, width):
flat_numpy_array = np.random.randint(0, 256, height * width)
gray_image = flat_numpy_array.reshape(height, width)
return gray_image
def draw_random(height, width, channel=3):
flat_numpy_array = np.random.randint(0, 256, height * width * channel)
bgr_image = flat_numpy_array.reshape((height, width, channel))
return bgr_image
def draw_gray_black(height, width):
img = np.zeros((height, width), dtype=np.uint8)
return img
def draw_line(img, x1, y1, x2, y2, color=(0, 255, 0), thickness=2):
return cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def draw_rectangle(img, box, contour_idx=0, color=(0, 0, 255), thickness=3):
return cv2.drawContours(img, box, contour_idx, color, thickness)
def draw_cicile(img, center, radius, color=(0, 255, 0), thickness=2):
return cv2.circle(img, center, radius, color, thickness)
def draw_black(height, width):
img = draw_black(height, width)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return img
def img2array(img):
return bytearray(img)
def array_img(arr, height, width, channel=3):
return np.array(arr).reshape(height, width, channel)
def array2img_gray(arr, height, width):
return np.array(arr).reshape(height, width)
if __name__ == '__main__':
img = cv2.imread('sphere.png')
cv2.imshow('win', img)
# empire = Image.open('sphere.png')
# cv2.waitKey()
# cv2.destroyAllWindows()
# print(empire.shape())
# empire.convert('RGB')
# print(empire.mode)
# print(empire.shape())
img = Image.open('sphere.png')
img = img.resize((137, 137))
# 将黑色的部分变为透明
print(img.info)
print(img.mode)
img = img.convert("RGBA")
print(img.mode)
width = img.size[0]
height = img.size[1]
for x in range(width):
for y in range(height):
r, g, b, a = img.getpixel((x, y))
rgba = (r, g, b, a)
if (r == g == b == 0):
img.putpixel((x, y), (0, 0, 0, 0))
img.save('sphere_2.png')
img.show()
| 27.904762 | 100 | 0.661628 | [
"MIT"
] | strawsyz/straw | my_cv/utils/cv2_util.py | 4,420 | Python |
try:
from django.conf.urls import *
except ImportError: # django < 1.4
from django.conf.urls.defaults import *
urlpatterns = patterns("rush_forms.views",
#url(r"^(?P<pk>\d+)/$", 'form_view', name='form-detail'),
url(r"^(?P<pk>\d+)/$", 'rush_form_view', name='detail'),
url(r"^(?P<pk>\d+)/(?P<user_id>\d+)/$", 'rush_form_user_entry_view', name='user_entry'),
)
| 42 | 111 | 0.512987 | [
"BSD-3-Clause"
] | goldhand/onegreek | onegreek/rush_forms/urls.py | 462 | Python |
import logging
import Queue
import sia_client as sc
logger = logging.getLogger(__name__)
def from_upload_jobs(upload_jobs):
"""Creates a new upload queue from a list of upload jobs.
Creates a new queue of files to upload by starting with the full input
dataset and removing any files that are uploaded (partially or fully) to
Sia.
Args:
upload_jobs: The unfiltered set of upload jobs.
Returns:
A Queue of upload jobs, filtered to remove jobs that are already
complete (the paths already exist on Sia).
"""
return from_upload_jobs_and_sia_client(upload_jobs, sc.make_sia_client())
def from_upload_jobs_and_sia_client(upload_jobs, sia_client):
"""Creates a new upload queue from a dataset.
Creates a new queue of files to upload by starting with the full input
dataset and removing any files that are uploaded (partially or fully) to
Sia.
Args:
upload_jobs: The unfiltered set of upload jobs.
sia_client: An implementation of the Sia client interface.
Returns:
A Queue of upload jobs, filtered to remove jobs that are already
complete (the paths already exist on Sia).
"""
sia_paths = _get_sia_paths(sia_client)
# Filter jobs for files that have already been uploaded to Sia.
upload_jobs = [j for j in upload_jobs if j.sia_path not in sia_paths]
logger.info('%d files already uploaded to Sia, need to upload %d more',
len(sia_paths), len(upload_jobs))
upload_queue = Queue.Queue()
for upload_job in upload_jobs:
upload_queue.put(upload_job)
return upload_queue
def _get_sia_paths(sia_client):
return set([f[u'siapath'] for f in sia_client.renter_files()])
| 32.222222 | 77 | 0.711494 | [
"MIT"
] | mtlynch/sia_load_tester | sia_load_tester/upload_queue.py | 1,740 | Python |
# ---------------------------------------------------------------------
# Vendor: DCN
# OS: DCWS
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
class Script(BaseScript):
name = "DCN.DCWS.get_version"
cache = True
interface = IGetVersion
rx_platform = re.compile(r"\s*(?P<platform>\S+)\s+Device.", re.MULTILINE)
rx_ver = re.compile(r"^\s*Soft[Ww]are\s+Version\s+(?P<version>\S+)\n", re.MULTILINE)
rx_bver = re.compile(r"^\s*Boot[Rr]om\s+Version\s+(?P<bversion>\S+)\n", re.MULTILINE)
rx_hver = re.compile(r"^\s*Hard[Ww]are\s+Version\s+(?P<hversion>\S+)\n", re.MULTILINE)
rx_serial = re.compile(r"^\s*Serial\s+No\s+(?P<serial>\S+)\n", re.MULTILINE)
def execute(self):
ver = self.cli("show version", cached=True)
match = self.re_search(self.rx_platform, ver)
vmatch = self.re_search(self.rx_ver, ver)
bmatch = self.re_search(self.rx_bver, ver)
hmatch = self.re_search(self.rx_hver, ver)
smatch = self.re_search(self.rx_serial, ver)
return {
"vendor": "DCN",
"platform": match.group("platform"),
"version": vmatch.group("version"),
"attributes": {
"Bootrom version": bmatch.group("bversion"),
"HW version": hmatch.group("hversion"),
"Serial Number": smatch.group("serial"),
},
}
| 37.688889 | 90 | 0.533019 | [
"BSD-3-Clause"
] | nocproject/noc | sa/profiles/DCN/DCWS/get_version.py | 1,696 | Python |
"""django1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('blog/', include('blog.urls')),
path('polls/', include('polls.urls')),
path('portfolio', include('portfolio.urls')),
path('admin/', admin.site.urls)
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 38.333333 | 82 | 0.721739 | [
"MIT"
] | ShiroDevC/Old_Code | python/Django/django1/urls.py | 1,150 | Python |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, absolute_import
"""
This module implements classes for reading and generating Lammps inputset.
For the ease of management we divide LAMMPS input into 2 files:
1.Data file: All structure related settings such as the atomic positions,
bonds, angles, dihedrals, corresponding parametrizations etc are
set in the data file.
2. Control/input file: This is the main input file that should be fed to the
lammps binary. The main input file consists of the path to the
afore-mentioned data file and the job control parameters such as
the ensemble type(NVT, NPT etc), max number of iterations etc.
"""
import os
import six
from monty.json import MSONable, MontyDecoder
from pymatgen.io.lammps.data import LammpsData
from pymatgen.io.lammps.input import LammpsInput
__author__ = "Kiran Mathew"
__email__ = "[email protected]"
class LammpsInputSet(MSONable):
def __init__(self, name, lammps_input, lammps_data=None,
data_filename="in.data", user_lammps_settings=None):
"""
Implementation of LammpsInputSet that is initialized from a dict
settings. It is typically used by other LammpsInputSets for
initialization from json or yaml source files.
Args:
name (str): A name for the input set.
lammps_input (LammpsInput): The config dictionary to use.
lammps_data (LammpsData): LammpsData object
data_filename (str): name of the the lammps data file.
Note: this will override the value for 'data_file' key in lammps_input
user_lammps_settings (dict): User lammps settings. This allows a user
to override lammps settings, e.g., setting a different force field
or bond type.
"""
self.name = name
self.lines = []
self.lammps_input = lammps_input
self.lammps_data = lammps_data
self.data_filename = data_filename
self.lammps_input.settings["data_file"] = data_filename
self.user_lammps_settings = user_lammps_settings or {}
self.lammps_input.settings.update(self.user_lammps_settings)
def write_input(self, input_filename, data_filename=None):
"""
Get the string representation of the main input file and write it.
Also writes the data file if the lammps_data attribute is set.
Args:
input_filename (string): name of the input file
data_filename (string): override the data file name with this
"""
if data_filename:
data_filename = os.path.abspath(os.path.join(os.getcwd(), data_filename))
if data_filename and ("data_file" in self.lammps_input.settings):
self.lammps_input.settings["data_file"] = data_filename
self.data_filename = data_filename
self.lammps_input.write_file(input_filename)
# write the data file if present
if self.lammps_data:
self.lammps_data.write_file(filename=self.data_filename)
@classmethod
def from_file(cls, name, input_template, user_settings,
lammps_data=None, data_filename="in.data"):
"""
Returns LammpsInputSet from input file template and input data.
Args:
name (str)
input_template (string): path to the input template file.
user_settings (dict): User lammps settings, the keys must
correspond to the keys in the template.
lammps_data (string/LammpsData): path to the
data file or an appropriate object
data_filename (string): name of the the lammps data file.
Returns:
LammpsInputSet
"""
user_settings["data_file"] = data_filename
lammps_input = LammpsInput.from_file(input_template, user_settings)
if isinstance(lammps_data, six.string_types):
lammps_data = LammpsData.from_file(lammps_data)
return cls(name, lammps_input, lammps_data=lammps_data,
data_filename=data_filename)
def as_dict(self):
d = MSONable.as_dict(self)
if hasattr(self, "kwargs"):
d.update(**self.kwargs)
d["lammps_input"] = self.lammps_input.as_dict()
return d
@classmethod
def from_dict(cls, d):
decoded = {k: MontyDecoder().process_decoded(v) for k, v in d.items()
if k not in ["@module", "@class", "lammps_input"]}
decoded["lammps_input"] = LammpsInput.from_dict(d["lammps_input"])
return cls(**decoded)
| 40.210084 | 86 | 0.660815 | [
"MIT"
] | JSelf42/pymatgen | pymatgen/io/lammps/sets.py | 4,785 | Python |
# coding=utf-8
# Exemplos para entendiemnto
"""nome = input('Qual seu nome?' )
if nome == 'Rodrigo' or nome == 'RAYANNE':
print('Que nome lindo vocé tem!')
else:
print('Que nome tão normal!!!')
print('Bom dia, {}'.format(nome))"""
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
m = (n1 + n2) / 2
print('A sua média foi: {:.1f}'.format(m))
print('A sua media foi boa!' if m >= 6.0 else 'Sua media foi ruim,estude mais!')
"""if m >= 6.0:
print('Sua média foi boa!')
else:
print('A sua média foi ruim,estude mais!')"""
| 28.9 | 80 | 0.615917 | [
"MIT"
] | rodrigobarbonifilho/Python | Python Aulas/Mundo 1/Aula 010c.py | 583 | Python |
"""
Read and plot a PPI from raw mini-MPL data
------------------------------------------
Example of how to read in raw data from the mini-MPL
and plot out the PPI by converting it to PyART
Author: Adam Theisen
"""
from matplotlib import pyplot as plt
import act
try:
import pyart
PYART_AVAILABLE = True
except ImportError:
PYART_AVAILABLE = False
# Read in sample mini-MPL data
files = act.tests.sample_files.EXAMPLE_SIGMA_MPLV5
obj = act.io.mpl.read_sigma_mplv5(files)
# Create a PyART Radar Object
radar = act.utils.create_pyart_obj(
obj, azimuth='azimuth_angle', elevation='elevation_angle', range_var='range'
)
# Creat Plot Display
if PYART_AVAILABLE:
display = pyart.graph.RadarDisplay(radar)
display.plot('nrb_copol', sweep=0, title_flag=False, vmin=0, vmax=1.0, cmap='jet')
plt.show()
| 22.459459 | 86 | 0.701564 | [
"BSD-3-Clause"
] | ANL-DIGR/ACT | examples/plot_raw_minimpl.py | 831 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for linear regression example under TensorFlow eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import tempfile
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression
def device():
return "/device:GPU:0" if tfe.num_gpus() > 0 else "/device:CPU:0"
class LinearRegressionTest(tf.test.TestCase):
def setUp(self):
super(LinearRegressionTest, self).setUp()
self._tmp_logdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._tmp_logdir)
super(LinearRegressionTest, self).tearDown()
def testSyntheticDataset(self):
true_w = tf.random_uniform([3, 1])
true_b = [1.0]
batch_size = 10
num_batches = 2
noise_level = 0.
dataset = linear_regression.synthetic_dataset(true_w, true_b, noise_level,
batch_size, num_batches)
it = tfe.Iterator(dataset)
for _ in range(2):
(xs, ys) = it.next()
self.assertEqual((batch_size, 3), xs.shape)
self.assertEqual((batch_size, 1), ys.shape)
self.assertEqual(tf.float32, xs.dtype)
self.assertEqual(tf.float32, ys.dtype)
with self.assertRaises(StopIteration):
it.next()
def testLinearRegression(self):
true_w = [[1.0], [-0.5], [2.0]]
true_b = [1.0]
model = linear_regression.LinearModel()
dataset = linear_regression.synthetic_dataset(
true_w, true_b, noise_level=0., batch_size=64, num_batches=40)
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
linear_regression.fit(model, dataset, optimizer, logdir=self._tmp_logdir)
self.assertAllClose(true_w, model.variables[0].numpy(), rtol=1e-2)
self.assertAllClose(true_b, model.variables[1].numpy(), rtol=1e-2)
self.assertTrue(glob.glob(os.path.join(self._tmp_logdir, "events.out.*")))
class EagerLinearRegressionBenchmark(tf.test.Benchmark):
def benchmarkEagerLinearRegression(self):
num_epochs = 10
num_batches = 200
batch_size = 64
dataset = linear_regression.synthetic_dataset(
w=tf.random_uniform([3, 1]),
b=tf.random_uniform([1]),
noise_level=0.01,
batch_size=batch_size,
num_batches=num_batches)
burn_in_dataset = dataset.take(10)
model = linear_regression.LinearModel()
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
# Perform burn-in.
linear_regression.fit(model, burn_in_dataset, optimizer)
start_time = time.time()
for _ in range(num_epochs):
linear_regression.fit(model, dataset, optimizer)
wall_time = time.time() - start_time
examples_per_sec = num_epochs * num_batches * batch_size / wall_time
self.report_benchmark(
name="eager_train_%s" %
("gpu" if tfe.num_gpus() > 0 else "cpu"),
iters=num_epochs * num_batches,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
| 33.52459 | 89 | 0.666504 | [
"Apache-2.0"
] | uve/tensorflow | tensorflow/contrib/eager/python/examples/linear_regression/linear_regression_test.py | 4,090 | Python |
from office365.graph.graph_client import GraphClient
from settings import settings
def get_token(auth_ctx):
"""Acquire token via client credential flow (ADAL Python library is utilized)"""
token = auth_ctx.acquire_token_with_client_credentials(
"https://graph.microsoft.com",
settings['client_credentials']['client_id'],
settings['client_credentials']['client_secret'])
return token
client = GraphClient(settings['tenant'], get_token)
message_json = {
"Message": {
"Subject": "Meet for lunch?",
"Body": {
"ContentType": "Text",
"Content": "The new cafeteria is open."
},
"ToRecipients": [
{
"EmailAddress": {
"Address": "[email protected]"
}
}
]
},
"SaveToSentItems": "false"
}
login_name = "[email protected]"
client.users[login_name].send_mail(message_json)
client.execute_query()
| 27.861111 | 84 | 0.612164 | [
"MIT"
] | stardust85/Office365-REST-Python-Client | examples/outlook/send_message.py | 1,003 | Python |
import tensorflow as tf
import cv2
import numpy as np
import os
from sklearn.model_selection import train_test_split
import random
import sys
my_image_path = 'my_face'
others_image_path = 'other_people'
image_data = []
label_data = []
def get_padding_size(image):
#def get_padding_size(image):
h, w, _ = image.shape #长,宽和通道数
longest_edge = max(h, w)
top, bottom, left, right = (0, 0, 0, 0)
if h <= longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w <= longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
return top, bottom, left, right #(0,0,0,0)
#os.listdir(path):path 要获得内容目录的路径。获得当前目录的所有内容。
def read_data(img_path, image_h=64, image_w=64):
for filename in os.listdir(img_path):
if filename.endswith('.jpg'):
filepath = os.path.join(img_path, filename)
image = cv2.imread(filepath)
top, bottom, left, right = get_padding_size(image)
image_pad = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0])
image = cv2.resize(image_pad, (image_h, image_w))
image_data.append(image)
label_data.append(img_path)
read_data(others_image_path)
read_data(my_image_path)
image_data = np.array(image_data)
label_data = np.array([[0,1] if label == 'my_faces' else [1,0] for label in label_data])
#功能是从样本中随机的按比例选取train data和test data, test_size是样本占比。如果是整数的话就是样本的数量。random_state是随机数的种子。
train_x, test_x, train_y, test_y = train_test_split(image_data, label_data, test_size=0.05, random_state=random.randint(0, 100))
# image (height=64 width=64 channel=3)
train_x = train_x.reshape(train_x.shape[0], 64, 64, 3)
test_x = test_x.reshape(test_x.shape[0], 64, 64, 3)
# nomalize
train_x = train_x.astype('float32') / 255.0
test_x = test_x.astype('float32') / 255.0
print(len(train_x), len(train_y))
print(len(test_x), len(test_y))
#############################################################
#batch_size = 128
batch_size = 64
num_batch = len(train_x) // batch_size
#tf.placeholder() 占位符,传递一个tensor到session.run()中。
X = tf.placeholder(tf.float32, [None, 64, 64, 3]) # 图片大小64x64 channel=3
Y = tf.placeholder(tf.float32, [None, 2])
keep_prob_5 = tf.placeholder(tf.float32)
keep_prob_75 = tf.placeholder(tf.float32)
def panda_joke_cnn():
W_c1 = tf.Variable(tf.random_normal([3, 3, 3, 32], stddev=0.01))
b_c1 = tf.Variable(tf.random_normal([32]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(X, W_c1, strides=[1, 1, 1, 1], padding='SAME'), b_c1))
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv1 = tf.nn.dropout(conv1, keep_prob_5)
#先W*X,再W*X+b,再Relu,再max_pool, 再,dropout
#Dropout是指在模型训练时随机让网络某些隐含层节点的权重不工作,不工作的那些节点可以暂时认为不是网络结构的一部分,但是它的权重得保留下来(只是暂时不更新而已),因为下次样本输入时它可能又得工作了
W_c2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01))
b_c2 = tf.Variable(tf.random_normal([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, W_c2, strides=[1, 1, 1, 1], padding='SAME'), b_c2))
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.dropout(conv2, keep_prob_5)
W_c3 = tf.Variable(tf.random_normal([3, 3, 64, 64], stddev=0.01))
b_c3 = tf.Variable(tf.random_normal([64]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, W_c3, strides=[1, 1, 1, 1], padding='SAME'), b_c3))
conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv3 = tf.nn.dropout(conv3, keep_prob_5)
W_c31 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.01))
b_c31 = tf.Variable(tf.random_normal([128]))
conv31 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv3, W_c31, strides=[1, 1, 1, 1], padding='SAME'), b_c31))
conv31 = tf.nn.max_pool(conv31, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv31 = tf.nn.dropout(conv31, keep_prob_5)
W_c32 = tf.Variable(tf.random_normal([3, 3, 128, 128], stddev=0.01))
b_c32 = tf.Variable(tf.random_normal([128]))
conv32 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv31, W_c32, strides=[1, 1, 1, 1], padding='SAME'), b_c32))
conv32 = tf.nn.max_pool(conv32, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv32 = tf.nn.dropout(conv32, keep_prob_5)
# Fully connected layer
#W_d = tf.Variable(tf.random_normal([8*16*32, 512], stddev=0.01))
W_d = tf.Variable(tf.random_normal([128*128, 512], stddev=0.01))
b_d = tf.Variable(tf.random_normal([512]))
dense = tf.reshape(conv32, [-1, W_d.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, W_d), b_d))
dense = tf.nn.dropout(dense, keep_prob_75)
W_out = tf.Variable(tf.random_normal([512, 2], stddev=0.01))
b_out = tf.Variable(tf.random_normal([2]))
out = tf.add(tf.matmul(dense, W_out), b_out)
return out
#learning_rate = 0.001
def train_cnn():
output = panda_joke_cnn()
#softmax_cross_entropy_with_logits():一步是先对网络最后一层的输出做一个softmax.
#第二步是softmax的输出向量[Y1,Y2,Y3...]和样本的实际标签做一个交叉熵.最后求一个平均,得到我们想要的loss.
#这个函数的返回值并不是一个数,而是一个向量.
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=output))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(output, 1), tf.argmax(Y, 1)), tf.float32))
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy", accuracy)
merged_summary_op = tf.summary.merge_all()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter('./log', graph=tf.get_default_graph())
for e in range(50):
for i in range(num_batch):
batch_x = train_x[i*batch_size : (i+1)*batch_size]
batch_y = train_y[i*batch_size : (i+1)*batch_size]
_, loss_, summary = sess.run([optimizer, loss, merged_summary_op], feed_dict={X: batch_x, Y: batch_y, keep_prob_5:0.5, keep_prob_75: 0.75})
summary_writer.add_summary(summary, e*num_batch+i)
print(e*num_batch+i, "loss= ", loss_)
if (e*num_batch+i) % 100 == 0:
acc = accuracy.eval({X: test_x, Y: test_y, keep_prob_5:1.0, keep_prob_75: 1.0})
print(e*num_batch+i,"acc= ", +acc)
# save model
if acc > 0.99:
saver.save(sess, "G:/codes/tensorflow2/WhetherOrNotMe/models/whether_orNot_me.model", global_step=e*num_batch+i)
if e*num_batch+i > 0:
sys.exit(0)
train_cnn()
output = panda_joke_cnn()
predict = tf.argmax(output, 1)
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess, tf.train.latest_checkpoint('.'))
def is_my_face(image):
res = sess.run(predict, feed_dict={X: [image/255.0], keep_prob_5:1.0, keep_prob_75: 1.0})
if res[0] == 1:
return True
else:
return False
face_haar = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
face_haar.load('D:/Program Files (x86)/Miniconda3/Library/etc/haarcascades/haarcascade_frontalface_default.xml')
cam = cv2.VideoCapture(0)
while True:
_, img = cam.read()
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_haar.detectMultiScale(gray_image, 1.3, 5)
for face_x,face_y,face_w,face_h in faces:
face = img[face_y:face_y+face_h, face_x:face_x+face_w]
face = cv2.resize(face, (64, 64))
print("my face:"+is_my_face(face))
cv2.imshow('img', face)
key = cv2.waitKey(30) & 0xff
if key == 27:
sys.exit(0)
sess.close() | 40.283582 | 156 | 0.632827 | [
"Apache-2.0"
] | linrio/WhetherOrNotMe | trainCNN.py | 8,629 | Python |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the Authorization Services.
Test-Suite to ensure that the Authorization Service is working as expected.
"""
from http import HTTPStatus
import pytest
from flask import jsonify
from legal_api.models.business import Business
from legal_api.services.authz import BASIC_USER, COLIN_SVC_ROLE, STAFF_ROLE, authorized, get_allowed, is_allowed
from tests import integration_authorization, not_github_ci
from .utils import helper_create_jwt
def test_jwt_manager_initialized(jwt):
"""Assert that the jwt_manager is created as part of the fixtures."""
assert jwt
@not_github_ci
def test_jwt_manager_correct_test_config(app_request, jwt):
"""Assert that the test configuration for the JWT is working as expected."""
message = 'This is a protected end-point'
protected_route = '/fake_jwt_route'
@app_request.route(protected_route)
@jwt.has_one_of_roles([STAFF_ROLE])
def get():
return jsonify(message=message)
# assert that JWT is setup correctly for a known role
token = helper_create_jwt(jwt, [STAFF_ROLE])
headers = {'Authorization': 'Bearer ' + token}
rv = app_request.test_client().get(protected_route, headers=headers)
assert rv.status_code == HTTPStatus.OK
# assert the JWT fails for an unknown role
token = helper_create_jwt(jwt, ['SHOULD-FAIL'])
headers = {'Authorization': 'Bearer ' + token}
rv = app_request.test_client().get(protected_route, headers=headers)
assert rv.status_code == HTTPStatus.UNAUTHORIZED
TEST_AUTHZ_DATA = [
('staff_role', # test name
'CP1234567', # business identifier
'happy-staff', # username
[STAFF_ROLE], # roles
['view', 'edit'], # allowed actions
['edit'], # requested action
HTTPStatus.OK), # expected response
('colin svc role', 'CP1234567', 'CP1234567', [COLIN_SVC_ROLE], ['view', 'edit'], ['edit'],
HTTPStatus.OK),
('authorized_user', 'CP0001237', 'CP1234567', [BASIC_USER], ['view', 'edit'], ['edit'],
HTTPStatus.OK),
('unauthorized_user', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, ['edit'],
HTTPStatus.METHOD_NOT_ALLOWED),
('missing_action', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, None,
HTTPStatus.METHOD_NOT_ALLOWED),
('invalid_action', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, ['scrum'],
HTTPStatus.METHOD_NOT_ALLOWED),
('add_comment_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['add_comment'],
HTTPStatus.METHOD_NOT_ALLOWED),
('court_order_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['court_order'],
HTTPStatus.METHOD_NOT_ALLOWED),
('registrars_notation_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['registrars_notation'],
HTTPStatus.METHOD_NOT_ALLOWED),
('registrars_order_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['registrars_order'],
HTTPStatus.METHOD_NOT_ALLOWED),
]
@not_github_ci
@pytest.mark.parametrize('test_name,identifier,username,roles,allowed_actions,requested_actions,expected',
TEST_AUTHZ_DATA)
def test_authorized_user(monkeypatch, app_request, jwt,
test_name, identifier, username, roles, allowed_actions, requested_actions, expected):
"""Assert that the type of user authorization is correct, based on the expected outcome."""
from requests import Response
print(test_name)
# mocks, the get and json calls for requests.Response
def mock_get(*args, **kwargs): # pylint: disable=unused-argument; mocks of library methods
resp = Response()
resp.status_code = 200
return resp
def mock_json(self, **kwargs): # pylint: disable=unused-argument; mocks of library methods
return {'roles': allowed_actions}
monkeypatch.setattr('requests.sessions.Session.get', mock_get)
monkeypatch.setattr('requests.Response.json', mock_json)
# setup
@app_request.route('/fake_jwt_route/<string:identifier>')
@jwt.requires_auth
def get_fake(identifier: str):
if not authorized(identifier, jwt, ['view']):
return jsonify(message='failed'), HTTPStatus.METHOD_NOT_ALLOWED
return jsonify(message='success'), HTTPStatus.OK
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
# test it
rv = app_request.test_client().get(f'/fake_jwt_route/{identifier}', headers=headers)
# check it
assert rv.status_code == expected
TEST_INTEG_AUTHZ_DATA = [
('staff_role', # test name
'CP1234567', # business identifier
'happy-staff', # username
[STAFF_ROLE], # roles
['view', 'edit'], # allowed actions
['edit'], # requested action
HTTPStatus.OK), # expected response
('colin svc role', 'CP1234567', 'CP1234567', [COLIN_SVC_ROLE], ['view', 'edit'], ['edit'],
HTTPStatus.OK),
('unauthorized_user', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, ['edit'],
HTTPStatus.METHOD_NOT_ALLOWED),
('missing_action', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, None,
HTTPStatus.METHOD_NOT_ALLOWED),
('invalid_action', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, ['scrum'],
HTTPStatus.METHOD_NOT_ALLOWED),
('add_comment_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['add_comment'],
HTTPStatus.METHOD_NOT_ALLOWED),
('court_order_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['court_order'],
HTTPStatus.METHOD_NOT_ALLOWED),
('registrars_notation_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['registrars_notation'],
HTTPStatus.METHOD_NOT_ALLOWED),
('registrars_order_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['registrars_order'],
HTTPStatus.METHOD_NOT_ALLOWED),
]
@integration_authorization
@pytest.mark.parametrize('test_name,identifier,username,roles,allowed_actions,requested_actions,expected',
TEST_INTEG_AUTHZ_DATA)
def test_authorized_user_integ(monkeypatch, app, jwt,
test_name, identifier, username, roles, allowed_actions, requested_actions, expected):
"""Assert that the type of user authorization is correct, based on the expected outcome."""
import flask # noqa: F401; import actually used in mock
# setup
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two): # pylint: disable=unused-argument; mocks of library methods
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
rv = authorized(identifier, jwt, ['view'])
# check it
if expected == HTTPStatus.OK:
assert rv
else:
assert not rv
def test_authorized_missing_args():
"""Assert that the missing args return False."""
identifier = 'a corp'
jwt = 'fake'
action = 'fake'
rv = authorized(identifier, jwt, None)
assert not rv
rv = authorized(identifier, None, action)
assert not rv
rv = authorized(None, jwt, action)
assert not rv
def test_authorized_bad_url(monkeypatch, app, jwt):
"""Assert that an invalid auth service URL returns False."""
import flask # noqa: F401; import actually used in mock
# setup
identifier = 'CP1234567'
username = 'username'
roles = [BASIC_USER]
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two): # pylint: disable=unused-argument; mocks of library methods
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
auth_svc_url = app.config['AUTH_SVC_URL']
app.config['AUTH_SVC_URL'] = 'http://no.way.this.works/dribble'
rv = authorized(identifier, jwt, ['view'])
app.config['AUTH_SVC_URL'] = auth_svc_url
assert not rv
def test_authorized_invalid_roles(monkeypatch, app, jwt):
"""Assert that an invalid role returns False."""
import flask # noqa: F401 ; import actually used in mock
# setup noqa: I003
identifier = 'CP1234567'
username = 'username'
roles = ['NONE']
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two): # pylint: disable=unused-argument; mocks of library methods
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
rv = authorized(identifier, jwt, ['view'])
assert not rv
@pytest.mark.parametrize(
'test_name,state,legal_type,username,roles,expected',
[
# active business
('staff_active_cp', Business.State.ACTIVE, 'CP', 'staff', [STAFF_ROLE],
['annualReport', 'changeOfAddress', 'changeOfDirectors', 'correction',
'courtOrder', 'dissolution', 'incorporationApplication',
'specialResolution', 'registrarsNotation', 'registrarsOrder']),
('staff_active_bc', Business.State.ACTIVE, 'BC', 'staff', [STAFF_ROLE],
['alteration', 'courtOrder', 'dissolution', 'incorporationApplication',
'transition', 'registrarsNotation', 'registrarsOrder']),
('staff_active_ben', Business.State.ACTIVE, 'BEN', 'staff', [STAFF_ROLE],
['alteration', 'annualReport', 'changeOfAddress', 'changeOfDirectors', 'conversion', 'correction',
'courtOrder', 'dissolution', 'incorporationApplication',
'transition', 'registrarsNotation', 'registrarsOrder']),
('staff_active_cc', Business.State.ACTIVE, 'CC', 'staff', [STAFF_ROLE],
['courtOrder', 'dissolution',
'registrarsNotation', 'registrarsOrder']),
('staff_active_ulc', Business.State.ACTIVE, 'ULC', 'staff', [STAFF_ROLE],
['alteration', 'courtOrder', 'dissolution',
'registrarsNotation', 'registrarsOrder']),
('staff_active_llc', Business.State.ACTIVE, 'LLC', 'staff', [STAFF_ROLE],
['courtOrder', 'dissolution',
'registrarsNotation', 'registrarsOrder']),
('staff_active_sp', Business.State.ACTIVE, 'SP', 'staff', [STAFF_ROLE],
['changeOfRegistration', 'conversion', 'dissolution', 'registration']),
('staff_active_gp', Business.State.ACTIVE, 'GP', 'staff', [STAFF_ROLE],
['changeOfRegistration', 'conversion', 'dissolution', 'registration']),
('user_active_cp', Business.State.ACTIVE, 'CP', 'user', [BASIC_USER],
['annualReport', 'changeOfAddress', 'changeOfDirectors',
'dissolution', 'incorporationApplication', 'specialResolution']),
('user_active_bc', Business.State.ACTIVE, 'BC', 'user', [BASIC_USER],
['alteration', 'dissolution', 'incorporationApplication', 'transition']),
('user_active_ben', Business.State.ACTIVE, 'BEN', 'user', [BASIC_USER],
['alteration', 'annualReport', 'changeOfAddress', 'changeOfDirectors',
'dissolution', 'incorporationApplication', 'transition']),
('user_active_cc', Business.State.ACTIVE, 'CC', 'user', [BASIC_USER], ['dissolution']),
('user_active_ulc', Business.State.ACTIVE, 'ULC', 'user', [BASIC_USER], ['alteration', 'dissolution']),
('user_active_llc', Business.State.ACTIVE, 'LLC', 'user', [BASIC_USER], ['dissolution']),
('user_active_sp', Business.State.ACTIVE, 'SP', 'user', [BASIC_USER], ['changeOfRegistration', 'dissolution', 'registration']),
('user_active_gp', Business.State.ACTIVE, 'GP', 'user', [BASIC_USER], ['changeOfRegistration', 'dissolution', 'registration']),
# historical business
('staff_historical_cp', Business.State.HISTORICAL, 'CP', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration']}]),
('staff_historical_bc', Business.State.HISTORICAL, 'BC', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('staff_historical_ben', Business.State.HISTORICAL, 'BEN', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('staff_historical_cc', Business.State.HISTORICAL, 'CC', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('staff_historical_ulc', Business.State.HISTORICAL, 'ULC', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('staff_historical_llc', Business.State.HISTORICAL, 'LLC', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('user_historical_llc', Business.State.HISTORICAL, 'LLC', 'user', [BASIC_USER], []),
]
)
def test_get_allowed(monkeypatch, app, jwt, test_name, state, legal_type, username, roles, expected):
"""Assert that get allowed returns valid filings."""
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two): # pylint: disable=unused-argument; mocks of library methods
return headers[one]
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
filing_types = get_allowed(state, legal_type, jwt)
assert filing_types == expected
@pytest.mark.parametrize(
'test_name,state,filing_type,sub_filing_type,legal_types,username,roles,expected',
[
# active business
('staff_active_allowed', Business.State.ACTIVE, 'alteration', None,
['BC', 'BEN', 'ULC'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'alteration', None,
['CP', 'CC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'annualReport', None,
['CP', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'annualReport', None,
['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'changeOfAddress', None,
['CP', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'changeOfAddress', None,
['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'changeOfDirectors', None,
['CP', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'changeOfDirectors', None,
['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'correction', None,
['CP', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'correction', None,
['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'courtOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'dissolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'incorporationApplication', None,
['CP', 'BC', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'restoration', 'fullRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active', Business.State.ACTIVE, 'restoration', 'limitedRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'specialResolution', None, ['CP'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'specialResolution', None,
['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'transition', None,
['BC', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'transition', None,
['CP', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'registrarsNotation', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'registrarsOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'registration', None,
['SP', 'GP'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'changeOfRegistration', None,
['SP', 'GP'], 'staff', [STAFF_ROLE], True),
('user_active_allowed', Business.State.ACTIVE, 'alteration', None,
['BC', 'BEN', 'ULC'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'alteration', None,
['CP', 'CC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'annualReport', None, ['CP', 'BEN'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'annualReport', None,
['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'changeOfAddress', None,
['CP', 'BEN'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'changeOfAddress', None,
['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'changeOfDirectors', None,
['CP', 'BEN'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'changeOfDirectors', None,
['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'correction', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'courtOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'dissolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'user', [BASIC_USER], True),
('user_active_allowed', Business.State.ACTIVE, 'incorporationApplication', None,
['CP', 'BC', 'BEN'], 'user', [BASIC_USER], True),
('user_active_allowed', Business.State.ACTIVE, 'registration', None,
['SP', 'GP'], 'user', [BASIC_USER], True),
('user_active_allowed', Business.State.ACTIVE, 'changeOfRegistration', None,
['SP', 'GP'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'restoration', 'fullRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'restoration', 'limitedRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'specialResolution', None, ['CP'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'specialResolution', None,
['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'transition', None, ['BC', 'BEN'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'transition', None,
['CP', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'registrarsNotation', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'registrarsOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
# historical business
('staff_historical', Business.State.HISTORICAL, 'alteration', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'annualReport', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'changeOfAddress', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'changeOfDirectors', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'correction', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical_allowed', Business.State.HISTORICAL, 'courtOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical', Business.State.HISTORICAL, 'dissolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'incorporationApplication', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical_allowed', Business.State.HISTORICAL, 'restoration', 'fullRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical_allowed', Business.State.HISTORICAL, 'restoration', 'limitedRestoration',
['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical', Business.State.HISTORICAL, 'restoration', 'limitedRestoration',
['CP'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'specialResolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'transition', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical_allowed', Business.State.HISTORICAL, 'registrarsNotation', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical_allowed', Business.State.HISTORICAL, 'registrarsOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical', Business.State.HISTORICAL, 'registration', None,
['SP', 'GP'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'changeOfRegistration', None,
['SP', 'GP'], 'staff', [STAFF_ROLE], False),
('user_historical', Business.State.HISTORICAL, 'alteration', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'annualReport', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'changeOfAddress', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'changeOfDirectors', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'correction', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'courtOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'dissolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP', 'SP', 'GP'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'incorporationApplication', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'restoration', 'fullRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'restoration', 'limitedRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'specialResolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'transition', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'registrarsNotation', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'registrarsOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'registration', None,
['SP', 'GP'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'changeOfRegistration', None,
['SP', 'GP'], 'user', [BASIC_USER], False),
]
)
def test_is_allowed(monkeypatch, app, jwt, test_name, state, filing_type, sub_filing_type,
legal_types, username, roles, expected):
"""Assert that get allowed returns valid filings."""
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two): # pylint: disable=unused-argument; mocks of library methods
return headers[one]
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
for legal_type in legal_types:
filing_types = is_allowed(state, filing_type, legal_type, jwt, sub_filing_type)
assert filing_types == expected
| 48.531306 | 135 | 0.635925 | [
"Apache-2.0"
] | leksmall/lear | legal-api/tests/unit/services/test_authorization.py | 27,130 | Python |
""" Tagger using mecab-service """
import traceback
import requests
from ..models import WordNode
from .base import Tagger
class MeCabServiceNode(WordNode):
"""
Parsed word node by MeCabServiceTagger
Attributes
----------
surface : str
Surface of word
part : str
Part of the word
part_detail1 : str
Detail1 of part
part_detail2 : str
Detail2 of part
part_detail3 : str
Detail3 of part
stem_type : str
Stem type
stem_form : str
Stem form
word : str
Word itself
kana : str
Japanese kana of the word
pronunciation : str
Pronunciation of the word
"""
@classmethod
def create(cls, surface, features):
"""
Create instance of MeCabServiceNode
Parameters
----------
surface : str
Surface of the word
features : dict
Features analyzed by MeCabService
"""
return cls(
surface=surface,
part=features["part"],
part_detail1=features["part_detail1"],
part_detail2=features["part_detail2"],
part_detail3=features["part_detail3"],
stem_type=features["stem_type"],
stem_form=features["stem_form"],
word=features["word"],
kana=features["kana"],
pronunciation=features["pronunciation"]
)
class MeCabServiceTagger(Tagger):
"""
Tagger using mecab-service
Attributes
----------
config : minette.Config
Configuration
timezone : pytz.timezone
Timezone
logger : logging.Logger
Logger
api_url : str
URL for MeCabService API
"""
def __init__(self, config=None, timezone=None, logger=None, *,
api_url=None, **kwargs):
"""
Parameters
----------
config : Config, default None
Configuration
timezone : timezone, default None
Timezone
logger : Logger, default None
Logger
api_url : str, default None
URL for MeCabService API.
If None trial URL is used.
"""
super().__init__(config=config, timezone=timezone, logger=logger)
if not api_url:
self.api_url = "https://api.uezo.net/mecab/parse"
self.logger.warning(
"Do not use default API URL for the production environment. "
"This is for trial use only. "
"Install MeCab and use MeCabTagger instead.")
else:
self.api_url = api_url
def parse(self, text):
"""
Parse and annotate using MeCab Service
Parameters
----------
text : str
Text to analyze
Returns
-------
words : list of minette.MeCabServiceNode
MeCabService nodes
"""
ret = []
if not text:
return ret
try:
parsed_json = requests.post(
self.api_url, headers={"content-type": "application/json"},
json={"text": text}, timeout=10).json()
ret = [MeCabServiceNode.create(
n["surface"], n["features"]) for n in parsed_json["nodes"]]
except Exception as ex:
self.logger.error(
"MeCab Service parsing error: "
+ str(ex) + "\n" + traceback.format_exc())
return ret
| 26.477273 | 77 | 0.5402 | [
"Apache-2.0"
] | uezo/minette-python | minette/tagger/mecabservice.py | 3,495 | Python |
version = '2.0'
import mylib
print 'self import in 2.0:', mylib
| 13 | 34 | 0.676923 | [
"BSD-3-Clause"
] | mitsuhiko/multiversion | example/mylib-2.0/mylib.py | 65 | Python |
from setuptools import setup, find_packages
import os.path
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with open(os.path.join(HERE, *parts)) as f:
return f.read()
setup(
name="doc484",
version="0.3.4",
author="Chad Dombrova",
description="Generate PEP 484 type comments from docstrings",
long_description=read("README.rst"),
license="MIT",
keywords=["mypy", "typing", "pep484", "docstrings", "annotations"],
url="https://github.com/chadrik/doc484",
packages=find_packages(),
entry_points={
'console_scripts': ['doc484=doc484.__main__:main'],
},
install_requires=[
"docutils", # only required for rest format
],
extras_require={
"tests": [
"coverage",
"pytest==3.6.2",
"tox==2.7.0",
],
},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 29.827586 | 77 | 0.590173 | [
"MIT"
] | chadrik/doc484 | setup.py | 1,730 | Python |
from django.contrib.gis.db import models
from django.contrib.gis.tests.utils import mysql, spatialite
# MySQL spatial indices can't handle NULL geometries.
null_flag = not mysql
class Country(models.Model):
name = models.CharField(max_length=30)
mpoly = models.MultiPolygonField() # SRID, by default, is 4326
objects = models.GeoManager()
def __unicode__(self): return self.name
class City(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.name
# This is an inherited model from City
class PennsylvaniaCity(City):
county = models.CharField(max_length=30)
founded = models.DateTimeField(null=True)
objects = models.GeoManager() # TODO: This should be implicitly inherited.
class State(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(null=null_flag) # Allowing NULL geometries here.
objects = models.GeoManager()
def __unicode__(self): return self.name
class Track(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class Truth(models.Model):
val = models.BooleanField()
objects = models.GeoManager()
if not spatialite:
class Feature(models.Model):
name = models.CharField(max_length=20)
geom = models.GeometryField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class MinusOneSRID(models.Model):
geom = models.PointField(srid=-1) # Minus one SRID.
objects = models.GeoManager()
| 33.058824 | 79 | 0.716489 | [
"Apache-2.0"
] | AppScale/appscale | AppServer/lib/django-1.4/django/contrib/gis/tests/geoapp/models.py | 1,686 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourceMetricName(Model):
"""Name of a metric for any resource .
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar value: metric name value.
:vartype value: str
:ivar localized_value: Localized metric name value.
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(self):
self.value = None
self.localized_value = None
| 29.85 | 76 | 0.579564 | [
"MIT"
] | AutorestCI/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/resource_metric_name.py | 1,194 | Python |
"""A CSV annotation writer that writes the bbox in x, y, w, h format."""
from .types import CSVAnnotationWriter
class WidthHeightCSV(CSVAnnotationWriter):
"""Writes annotations to a CSV file in the following format.
image_name, x_min, y_min, width, height, label
"""
def get_csv_fieldnames(self):
"""Return the field names for the CSV file."""
return ["image_name", "x_min", "y_min", "width", "height", "label"]
def get_csv_row(self, image_name, _image, annotation):
"""Return the CSV row corresponding to the given annotation."""
return {
"image_name": image_name,
"x_min": annotation.x_min,
"y_min": annotation.y_min,
"width": annotation.x_max - annotation.x_min,
"height": annotation.y_max - annotation.y_min,
"label": annotation.class_idx
}
| 33.961538 | 75 | 0.629672 | [
"MIT"
] | arunraja-hub/discolight | src/discolight/writers/annotation/widthheightcsv.py | 883 | Python |
import logging
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ValidationError
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView, TemplateView
from django.views.generic.base import View as BaseView
from rdmo.core.imports import handle_uploaded_file
from rdmo.core.plugins import get_plugin, get_plugins
from rdmo.core.views import RedirectViewMixin
from rdmo.questions.models import Catalog
from rdmo.tasks.models import Task
from rdmo.views.models import View
from ..forms import ProjectForm
from ..models import Membership, Project
from ..utils import (save_import_snapshot_values, save_import_tasks,
save_import_values, save_import_views)
logger = logging.getLogger(__name__)
class ProjectCreateView(LoginRequiredMixin, RedirectViewMixin, CreateView):
model = Project
form_class = ProjectForm
def get_form_kwargs(self):
catalogs = Catalog.objects.filter_current_site() \
.filter_group(self.request.user) \
.filter_availability(self.request.user)
form_kwargs = super().get_form_kwargs()
form_kwargs.update({
'catalogs': catalogs
})
return form_kwargs
def form_valid(self, form):
# add current site
form.instance.site = get_current_site(self.request)
# save the project
response = super(ProjectCreateView, self).form_valid(form)
# add all tasks to project
tasks = Task.objects.filter_current_site() \
.filter_group(self.request.user) \
.filter_availability(self.request.user)
for task in tasks:
form.instance.tasks.add(task)
# add all views to project
views = View.objects.filter_current_site() \
.filter_catalog(self.object.catalog) \
.filter_group(self.request.user) \
.filter_availability(self.request.user)
for view in views:
form.instance.views.add(view)
# add current user as owner
membership = Membership(project=form.instance, user=self.request.user, role='owner')
membership.save()
return response
class ProjectCreateUploadView(LoginRequiredMixin, BaseView):
success_url = reverse_lazy('projects')
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(self.success_url)
def post(self, request, *args, **kwargs):
try:
uploaded_file = request.FILES['uploaded_file']
except KeyError:
return HttpResponseRedirect(self.success_url)
else:
import_tmpfile_name = handle_uploaded_file(uploaded_file)
for import_key, import_plugin in get_plugins('PROJECT_IMPORTS').items():
import_plugin.file_name = import_tmpfile_name
if import_plugin.check():
try:
import_plugin.process()
except ValidationError as e:
return render(request, 'core/error.html', {
'title': _('Import error'),
'errors': e
}, status=400)
# store information in session for ProjectCreateImportView
request.session['create_import_tmpfile_name'] = import_tmpfile_name
request.session['create_import_key'] = import_key
return render(request, 'projects/project_upload.html', {
'create': True,
'file_name': uploaded_file.name,
'project': import_plugin.project,
'values': import_plugin.values,
'snapshots': import_plugin.snapshots,
'tasks': import_plugin.tasks,
'views': import_plugin.views
})
return render(request, 'core/error.html', {
'title': _('Import error'),
'errors': [_('Files of this type cannot be imported.')]
}, status=400)
class ProjectCreateImportView(LoginRequiredMixin, TemplateView):
success_url = reverse_lazy('projects')
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(self.success_url)
def post(self, request, *args, **kwargs):
import_tmpfile_name = request.session.get('create_import_tmpfile_name')
import_key = request.session.get('create_import_key')
checked = [key for key, value in request.POST.items() if 'on' in value]
if import_tmpfile_name and import_key:
import_plugin = get_plugin('PROJECT_IMPORTS', import_key)
import_plugin.file_name = import_tmpfile_name
if import_plugin.check():
try:
import_plugin.process()
except ValidationError as e:
return render(request, 'core/error.html', {
'title': _('Import error'),
'errors': e
}, status=400)
# add current site and save project
import_plugin.project.site = get_current_site(self.request)
import_plugin.project.save()
# add user to project
membership = Membership(project=import_plugin.project, user=request.user, role='owner')
membership.save()
save_import_values(import_plugin.project, import_plugin.values, checked)
save_import_snapshot_values(import_plugin.project, import_plugin.snapshots, checked)
save_import_tasks(import_plugin.project, import_plugin.tasks)
save_import_views(import_plugin.project, import_plugin.views)
return HttpResponseRedirect(import_plugin.project.get_absolute_url())
return render(request, 'core/error.html', {
'title': _('Import error'),
'errors': [_('There has been an error with your import.')]
}, status=400)
| 39.217391 | 103 | 0.625436 | [
"Apache-2.0"
] | cbittner/rdmo | rdmo/projects/views/project_create.py | 6,314 | Python |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json
import os
import urllib2
import argparse
import sys
searchterm = str(sys.argv[1]) # will also be the name of the folder
url = "https://www.google.co.in/search?q="+searchterm+"&source=lnms&tbm=isch"
browser = webdriver.Chrome()
browser.get(url)
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
counter = 0
succounter = 0
if not os.path.exists(searchterm):
os.mkdir(searchterm)
for _ in range(500):
browser.execute_script("window.scrollBy(0,10000)")
for x in browser.find_elements_by_xpath('//div[contains(@class,"rg_meta")]'):
counter = counter + 1
print "Total Count:", counter
print "Succsessful Count:", succounter
print "URL:",json.loads(x.get_attribute('innerHTML'))["ou"]
if "mrporter" not in (json.loads(x.get_attribute('innerHTML'))["ou"]) and "images.asos" not in (json.loads(x.get_attribute('innerHTML'))["ou"]) and "famousfootwear" not in (json.loads(x.get_attribute('innerHTML'))["ou"]):
img = json.loads(x.get_attribute('innerHTML'))["ou"]
imgtype = json.loads(x.get_attribute('innerHTML'))["ity"]
try:
req = urllib2.Request(img, headers={'User-Agent': header})
raw_img = urllib2.urlopen(req).read()
File = open(os.path.join(searchterm , searchterm + "_" + str(counter) + "." + imgtype), "wb")
File.write(raw_img)
File.close()
succounter = succounter + 1
except:
print "can't get img"
print succounter, "pictures succesfully downloaded"
browser.close()
| 39.302326 | 225 | 0.671006 | [
"MIT"
] | danscime/Tip-Of-My-Shoe | Model/imagescrape.py | 1,690 | Python |
# Object tracking with keypoints example.
# Show the camera an object and then run the script. A set of keypoints will be extracted
# once and then tracked in the following frames. If you want a new set of keypoints re-run
# the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints.
import sensor, time, image
# Reset sensor
sensor.reset()
# Sensor settings
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240))
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False, value=100)
def draw_keypoints(img, kpts):
print(kpts)
img.draw_keypoints(kpts)
img = sensor.snapshot()
time.sleep(1000)
kpts1 = None
# NOTE: uncomment to load a keypoints descriptor from file
#kpts1 = image.load_descriptor("/desc.orb")
#img = sensor.snapshot()
#draw_keypoints(img, kpts1)
clock = time.clock()
while (True):
clock.tick()
img = sensor.snapshot()
if (kpts1 == None):
# NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid.
kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2)
draw_keypoints(img, kpts1)
else:
# NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract
# keypoints from the first scale only, which will match one of the scales in the first descriptor.
kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True)
if (kpts2):
match = image.match_descriptor(kpts1, kpts2, threshold=85)
if (match.count()>10):
# If we have at least n "good matches"
# Draw bounding rectangle and cross.
img.draw_rectangle(match.rect())
img.draw_cross(match.cx(), match.cy(), size=10)
print(kpts2, "matched:%d dt:%d"%(match.count(), match.theta()))
# NOTE: uncomment if you want to draw the keypoints
#img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True)
# Draw FPS
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
| 37.655172 | 106 | 0.687729 | [
"MIT"
] | Jack19960208/openmv | usr/examples/09-Feature-Detection/keypoints.py | 2,184 | Python |
import os
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
test_config_dir = os.path.join(test_data_dir, 'config-test')
graph_data_dir = os.path.join(test_data_dir, 'graph_data')
default_worker = {"platform": 'linux',
'arch': '64',
'label': 'linux',
'pool_name': 'linux_pool'}
def make_recipe(name, dependencies=()):
os.makedirs(name)
with open(os.path.join(name, 'meta.yaml'), 'w') as f:
# not valid meta.yaml. Doesn't matter for test.
f.write('package:\n')
f.write(' name: {0}\n'.format(name))
f.write(' version: 1.0\n')
if dependencies:
f.write('requirements:\n')
f.write(' build:\n')
for dep in dependencies:
f.write(' - {0}\n'.format(dep))
| 33.32 | 63 | 0.552221 | [
"BSD-3-Clause"
] | AnjuSThomas-anaconda/conda-concourse-ci | tests/utils.py | 833 | Python |
import contextlib
import ipaddress
import json
import os
import random
import re
import time
import warnings
from collections import Counter
from typing import Any, Dict, List, Optional, Set, Union
import requests
import test_infra.utils.waiting
import waiting
import yaml
from assisted_service_client import models
from assisted_service_client.models.operator_type import OperatorType
from junit_report import JunitTestCase
from netaddr import IPAddress, IPNetwork
from test_infra import consts, utils
from test_infra.assisted_service_api import InventoryClient
from test_infra.controllers.load_balancer_controller import LoadBalancerController
from test_infra.controllers.node_controllers import Node
from test_infra.helper_classes.cluster_host import ClusterHost
from test_infra.helper_classes.config import BaseClusterConfig, BaseInfraEnvConfig
from test_infra.helper_classes.entity import Entity
from test_infra.helper_classes.events_handler import EventsHandler
from test_infra.helper_classes.infra_env import InfraEnv
from test_infra.helper_classes.nodes import Nodes
from test_infra.tools import static_network, terraform_utils
from test_infra.utils import Path, log, logs_utils, network_utils, operators_utils
from test_infra.utils.entity_name import ClusterName
class Cluster(Entity):
MINIMUM_NODES_TO_WAIT = 1
EVENTS_THRESHOLD = 500 # TODO - remove EVENTS_THRESHOLD after removing it from kni-assisted-installer-auto
_config: BaseClusterConfig
def __init__(
self,
api_client: InventoryClient,
config: BaseClusterConfig,
infra_env_config: BaseInfraEnvConfig,
nodes: Optional[Nodes] = None,
):
super().__init__(api_client, config, nodes)
self._infra_env_config = infra_env_config
self._infra_env = None
# Update infraEnv configurations
self._infra_env_config.cluster_id = config.cluster_id
self._infra_env_config.openshift_version = self._config.openshift_version
self._infra_env_config.pull_secret = self._config.pull_secret
self._high_availability_mode = config.high_availability_mode
self.name = config.cluster_name.get()
@property
def kubeconfig_path(self):
return self._config.kubeconfig_path
@property
def iso_download_path(self):
return self._config.iso_download_path
@property
def enable_image_download(self):
return self._config.download_image
def _update_day2_config(self, api_client: InventoryClient, cluster_id: str):
day2_cluster: models.cluster.Cluster = api_client.cluster_get(cluster_id)
self.update_config(
**dict(
openshift_version=day2_cluster.openshift_version,
cluster_name=ClusterName(day2_cluster.name),
additional_ntp_source=day2_cluster.additional_ntp_source,
user_managed_networking=day2_cluster.user_managed_networking,
high_availability_mode=day2_cluster.high_availability_mode,
olm_operators=day2_cluster.monitored_operators,
base_dns_domain=day2_cluster.base_dns_domain,
vip_dhcp_allocation=day2_cluster.vip_dhcp_allocation,
)
)
def _create(self) -> str:
if self._config.cluster_id:
log.info(f"Fetching day2 cluster with id {self._config.cluster_id}")
self._update_day2_config(self.api_client, self._config.cluster_id)
return self._config.cluster_id
cluster = self.api_client.create_cluster(
self._config.cluster_name.get(),
ssh_public_key=self._config.ssh_public_key,
openshift_version=self._config.openshift_version,
pull_secret=self._config.pull_secret,
base_dns_domain=self._config.base_dns_domain,
vip_dhcp_allocation=self._config.vip_dhcp_allocation,
additional_ntp_source=self._config.additional_ntp_source,
user_managed_networking=self._config.user_managed_networking,
high_availability_mode=self._config.high_availability_mode,
olm_operators=[{"name": name} for name in self._config.olm_operators],
network_type=self._config.network_type,
)
self._config.cluster_id = cluster.id
return cluster.id
def delete(self):
self.api_client.delete_cluster(self.id)
def get_details(self):
return self.api_client.cluster_get(self.id)
def get_cluster_name(self):
return self.get_details().name
def get_hosts(self):
return self.api_client.get_cluster_hosts(self.id)
def get_host_ids(self):
return [host["id"] for host in self.get_hosts()]
def get_host_ids_names_mapping(self):
return {host["id"]: host["requested_hostname"] for host in self.get_hosts()}
def get_host_assigned_roles(self):
hosts = self.get_hosts()
return {h["id"]: h["role"] for h in hosts}
def get_operators(self):
return self.api_client.get_cluster_operators(self.id)
# TODO remove in favor of generate_infra_env
def generate_image(self):
warnings.warn("generate_image is deprecated. Use generate_infra_env instead.", DeprecationWarning)
self.api_client.generate_image(cluster_id=self.id, ssh_key=self._config.ssh_public_key)
def generate_infra_env(
self, static_network_config=None, iso_image_type=None, ssh_key=None, ignition_info=None, proxy=None
) -> InfraEnv:
self._infra_env_config.ssh_public_key = ssh_key or self._config.ssh_public_key
self._infra_env_config.iso_image_type = iso_image_type or self._config.iso_image_type
self._infra_env_config.static_network_config = static_network_config
self._infra_env_config.ignition_config_override = ignition_info
self._infra_env_config.proxy = proxy or self._config.proxy
infra_env = InfraEnv(api_client=self.api_client, config=self._infra_env_config)
self._infra_env = infra_env
return infra_env
def update_infra_env_proxy(self, proxy: models.Proxy) -> None:
self._infra_env_config.proxy = proxy
self._infra_env.update_proxy(proxy=proxy)
def download_infra_env_image(self, iso_download_path=None) -> Path:
iso_download_path = iso_download_path or self._config.iso_download_path
return self._infra_env.download_image(iso_download_path=iso_download_path)
@JunitTestCase()
def generate_and_download_infra_env(
self,
iso_download_path=None,
static_network_config=None,
iso_image_type=None,
ssh_key=None,
ignition_info=None,
proxy=None,
) -> Path:
if self._config.is_static_ip and static_network_config is None:
static_network_config = static_network.generate_static_network_data_from_tf(self.nodes.controller.tf_folder)
self.generate_infra_env(
static_network_config=static_network_config,
iso_image_type=iso_image_type,
ssh_key=ssh_key,
ignition_info=ignition_info,
proxy=proxy,
)
return self.download_infra_env_image(iso_download_path=iso_download_path or self._config.iso_download_path)
@JunitTestCase()
def generate_and_download_image(
self, iso_download_path=None, static_network_config=None, iso_image_type=None, ssh_key=None
):
warnings.warn(
"generate_and_download_image is deprecated. Use generate_and_download_infra_env instead.",
DeprecationWarning,
)
iso_download_path = iso_download_path or self._config.iso_download_path
# ensure file path exists before downloading
if not os.path.exists(iso_download_path):
utils.recreate_folder(os.path.dirname(iso_download_path), force_recreate=False)
self.api_client.generate_and_download_image(
cluster_id=self.id,
ssh_key=ssh_key or self._config.ssh_public_key,
image_path=iso_download_path,
image_type=iso_image_type or self._config.iso_image_type,
static_network_config=static_network_config,
)
def wait_until_hosts_are_disconnected(self, nodes_count: int = None):
statuses = [consts.NodesStatus.DISCONNECTED]
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
nodes_count=nodes_count or self.nodes.nodes_count,
statuses=statuses,
timeout=consts.DISCONNECTED_TIMEOUT,
)
@JunitTestCase()
def wait_until_hosts_are_discovered(self, allow_insufficient=False, nodes_count: int = None):
statuses = [consts.NodesStatus.PENDING_FOR_INPUT, consts.NodesStatus.KNOWN]
if allow_insufficient:
statuses.append(consts.NodesStatus.INSUFFICIENT)
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
nodes_count=nodes_count or self.nodes.nodes_count,
statuses=statuses,
timeout=consts.NODES_REGISTERED_TIMEOUT,
)
def _get_matching_hosts(self, host_type, count):
hosts = self.get_hosts()
return [{"id": h["id"], "role": host_type} for h in hosts if host_type in h["requested_hostname"]][:count]
def set_cluster_name(self, cluster_name: str):
log.info(f"Setting Cluster Name:{cluster_name} for cluster: {self.id}")
self.update_config(cluster_name=ClusterName(prefix=cluster_name, suffix=None))
self.api_client.update_cluster(self.id, {"name": cluster_name})
def select_installation_disk(self, host_id: str, disk_paths: List[dict]) -> None:
self._infra_env.select_host_installation_disk(host_id=host_id, disk_paths=disk_paths)
def set_ocs(self, properties=None):
self.set_olm_operator(consts.OperatorType.OCS, properties=properties)
def set_cnv(self, properties=None):
self.set_olm_operator(consts.OperatorType.CNV, properties=properties)
def unset_ocs(self):
self.unset_olm_operator(consts.OperatorType.OCS)
def unset_cnv(self):
self.unset_olm_operator(consts.OperatorType.CNV)
def unset_olm_operator(self, operator_name):
log.info(f"Unsetting {operator_name} for cluster: {self.id}")
cluster = self.api_client.cluster_get(self.id)
olm_operators = []
for operator in cluster.monitored_operators:
if operator.name == operator_name or operator.operator_type == OperatorType.BUILTIN:
continue
olm_operators.append({"name": operator.name, "properties": operator.properties})
self.api_client.update_cluster(self.id, {"olm_operators": olm_operators})
def set_olm_operator(self, operator_name, properties=None):
log.info(f"Setting {operator_name} for cluster: {self.id}")
cluster = self.api_client.cluster_get(self.id)
if operator_name in [o.name for o in cluster.monitored_operators]:
return
olm_operators = []
for operator in cluster.monitored_operators:
if operator.operator_type == OperatorType.BUILTIN:
continue
olm_operators.append({"name": operator.name, "properties": operator.properties})
olm_operators.append({"name": operator_name, "properties": properties})
self._config.olm_operators = olm_operators
self.api_client.update_cluster(self.id, {"olm_operators": olm_operators})
def set_host_roles(self, num_masters: int = None, num_workers: int = None, requested_roles=None):
if requested_roles is None:
requested_roles = Counter(
master=num_masters or self.nodes.masters_count, worker=num_workers or self.nodes.workers_count
)
assigned_roles = self._get_matching_hosts(host_type=consts.NodeRoles.MASTER, count=requested_roles["master"])
assigned_roles.extend(
self._get_matching_hosts(host_type=consts.NodeRoles.WORKER, count=requested_roles["worker"])
)
for role in assigned_roles:
self._infra_env.update_host(host_id=role["id"], host_role=role["role"])
return assigned_roles
def set_specific_host_role(self, host, role):
self._infra_env.update_host(host_id=host["id"], host_role=role)
def set_network_params(self, controller=None):
# Controller argument is here only for backward compatibility TODO - Remove after QE refactor all e2e tests
controller = controller or self.nodes.controller # TODO - Remove after QE refactor all e2e tests
if self._config.platform == consts.Platforms.NONE:
log.info("On None platform, leaving network management to the user")
api_vip = ingress_vip = machine_networks = None
elif self._config.vip_dhcp_allocation or self._high_availability_mode == consts.HighAvailabilityMode.NONE:
log.info("Letting access VIPs be deducted from machine networks")
api_vip = ingress_vip = None
machine_networks = self.get_machine_networks()
else:
log.info("Assigning VIPs statically")
access_vips = controller.get_ingress_and_api_vips()
api_vip = access_vips["api_vip"]
ingress_vip = access_vips["ingress_vip"]
machine_networks = None
self.set_advanced_networking(
vip_dhcp_allocation=self._config.vip_dhcp_allocation,
cluster_networks=self._config.cluster_networks,
service_networks=self._config.service_networks,
machine_networks=machine_networks,
api_vip=api_vip,
ingress_vip=ingress_vip,
)
# TODO: when assisted-service supports configuring dual-stack networks on one go,
# change it so that we call set_advanced_networking only once
if self._config.is_ipv4 and self._config.is_ipv6:
machine_networks = controller.get_all_machine_addresses()
self.set_advanced_networking(machine_networks=machine_networks)
def get_primary_machine_cidr(self):
cidr = self.nodes.controller.get_primary_machine_cidr()
if not cidr:
# Support controllers which the machine cidr is not configurable. taking it from the AI instead
matching_cidrs = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details()))
if not matching_cidrs:
raise RuntimeError("No matching cidr for DHCP")
cidr = next(iter(matching_cidrs))
return cidr
def get_machine_networks(self):
networks = []
primary_machine_cidr = self.nodes.controller.get_primary_machine_cidr()
if primary_machine_cidr:
networks.append(primary_machine_cidr)
secondary_machine_cidr = self.nodes.controller.get_provisioning_cidr()
if secondary_machine_cidr:
networks.append(secondary_machine_cidr)
if not networks:
# Support controllers which the machine cidr is not configurable. taking it from the AI instead
networks = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details()))
if not networks:
raise RuntimeError("No matching cidr for DHCP")
return networks
def set_ingress_and_api_vips(self, vips):
log.info(f"Setting API VIP:{vips['api_vip']} and ingress VIP:{vips['ingress_vip']} for cluster: {self.id}")
self.api_client.update_cluster(self.id, vips)
def set_ssh_key(self, ssh_key: str):
log.info(f"Setting SSH key:{ssh_key} for cluster: {self.id}")
self.update_config(ssh_public_key=ssh_key)
self.api_client.update_cluster(self.id, {"ssh_public_key": ssh_key})
def set_base_dns_domain(self, base_dns_domain: str):
log.info(f"Setting base DNS domain:{base_dns_domain} for cluster: {self.id}")
self.update_config(base_dns_domain=base_dns_domain)
self.api_client.update_cluster(self.id, {"base_dns_domain": base_dns_domain})
def set_advanced_networking(
self,
vip_dhcp_allocation: Optional[bool] = None,
cluster_networks: Optional[List[models.ClusterNetwork]] = None,
service_networks: Optional[List[models.ServiceNetwork]] = None,
machine_networks: Optional[List[models.MachineNetwork]] = None,
api_vip: Optional[str] = None,
ingress_vip: Optional[str] = None,
):
if machine_networks is None:
machine_networks = self._config.machine_networks
else:
machine_networks = [models.MachineNetwork(cidr=cidr) for cidr in machine_networks]
if vip_dhcp_allocation is None:
vip_dhcp_allocation = self._config.vip_dhcp_allocation
advanced_networking = {
"vip_dhcp_allocation": vip_dhcp_allocation,
"cluster_networks": cluster_networks if cluster_networks is not None else self._config.cluster_networks,
"service_networks": service_networks if service_networks is not None else self._config.service_networks,
"machine_networks": machine_networks,
"api_vip": api_vip if api_vip is not None else self._config.api_vip,
"ingress_vip": ingress_vip if ingress_vip is not None else self._config.ingress_vip,
}
log.info(f"Updating advanced networking with {advanced_networking} for cluster: {self.id}")
self.update_config(**advanced_networking)
self.api_client.update_cluster(self.id, advanced_networking)
def set_pull_secret(self, pull_secret: str):
log.info(f"Setting pull secret:{pull_secret} for cluster: {self.id}")
self.update_config(pull_secret=pull_secret)
self.api_client.update_cluster(self.id, {"pull_secret": pull_secret})
def set_host_name(self, host_id, requested_name):
log.info(f"Setting Required Host Name:{requested_name}, for Host ID: {host_id}")
self._infra_env.update_host(host_id=host_id, host_name=requested_name)
def set_additional_ntp_source(self, ntp_source: List[str]):
log.info(f"Setting Additional NTP source:{ntp_source}")
if isinstance(ntp_source, List):
ntp_source_string = ",".join(ntp_source)
elif isinstance(ntp_source, str):
ntp_source_string = ntp_source
else:
raise TypeError(
f"ntp_source must be a string or a list of strings, got: {ntp_source}," f" type: {type(ntp_source)}"
)
self.update_config(additional_ntp_source=ntp_source_string)
self.api_client.update_cluster(self.id, {"additional_ntp_source": ntp_source_string})
def patch_discovery_ignition(self, ignition):
self._infra_env.patch_discovery_ignition(ignition_info=ignition)
def set_proxy_values(self, proxy_values: models.Proxy) -> None:
log.info(f"Setting proxy values {proxy_values} for cluster: {self.id}")
self.update_config(proxy=proxy_values)
self.api_client.set_cluster_proxy(
self.id,
http_proxy=self._config.proxy.http_proxy,
https_proxy=self._config.proxy.https_proxy,
no_proxy=self._config.proxy.no_proxy,
)
@JunitTestCase()
def start_install(self):
self.api_client.install_cluster(cluster_id=self.id)
def wait_for_logs_complete(self, timeout, interval=60, check_host_logs_only=False):
logs_utils.wait_for_logs_complete(
client=self.api_client,
cluster_id=self.id,
timeout=timeout,
interval=interval,
check_host_logs_only=check_host_logs_only,
)
def wait_for_installing_in_progress(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS],
nodes_count=nodes_count,
timeout=consts.INSTALLING_IN_PROGRESS_TIMEOUT,
)
def wait_for_write_image_to_disk(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.WRITE_IMAGE_TO_DISK, consts.HostsProgressStages.REBOOTING],
nodes_count=nodes_count,
)
def wait_for_host_status(self, statuses, fall_on_error_status=True, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=statuses,
nodes_count=nodes_count,
fall_on_error_status=fall_on_error_status,
)
def wait_for_specific_host_status(self, host, statuses, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_specific_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
host_name=host.get("requested_hostname"),
statuses=statuses,
nodes_count=nodes_count,
)
def wait_for_specific_host_stage(self, host: dict, stage: str, inclusive: bool = True):
index = consts.all_host_stages.index(stage)
test_infra.utils.waiting.wait_till_specific_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
host_name=host.get("requested_hostname"),
stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :],
)
def wait_for_cluster_in_error_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.ERROR],
timeout=consts.ERROR_TIMEOUT,
)
def wait_for_pending_for_input_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.PENDING_FOR_INPUT],
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_at_least_one_host_to_boot_during_install(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.REBOOTING],
nodes_count=nodes_count,
)
def wait_for_non_bootstrap_masters_to_reach_configuring_state_during_install(self, num_masters: int = None):
num_masters = num_masters if num_masters is not None else self.nodes.masters_count
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.CONFIGURING],
nodes_count=num_masters - 1,
)
def wait_for_non_bootstrap_masters_to_reach_joined_state_during_install(self, num_masters: int = None):
num_masters = num_masters if num_masters is not None else self.nodes.masters_count
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.JOINED],
nodes_count=num_masters - 1,
)
def wait_for_hosts_stage(self, stage: str, inclusive: bool = True):
index = consts.all_host_stages.index(stage)
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :],
nodes_count=self.nodes.nodes_count,
)
@JunitTestCase()
def start_install_and_wait_for_installed(
self,
wait_for_hosts=True,
wait_for_operators=True,
wait_for_cluster_install=True,
download_kubeconfig=True,
):
self.start_install()
if wait_for_hosts:
self.wait_for_hosts_to_install()
if wait_for_operators:
self.wait_for_operators_to_finish()
if wait_for_cluster_install:
self.wait_for_install()
if download_kubeconfig:
self.download_kubeconfig()
def disable_worker_hosts(self):
hosts = self.get_hosts_by_role(consts.NodeRoles.WORKER)
for host in hosts:
self.disable_host(host)
def disable_host(self, host):
host_name = host["requested_hostname"]
log.info(f"Going to disable host: {host_name} in cluster: {self.id}")
self._infra_env.unbind_host(host_id=host["id"])
def enable_host(self, host):
host_name = host["requested_hostname"]
log.info(f"Going to enable host: {host_name} in cluster: {self.id}")
self._infra_env.bind_host(host_id=host["id"], cluster_id=self.id)
def delete_host(self, host):
host_id = host["id"]
log.info(f"Going to delete host: {host_id} in cluster: {self.id}")
self._infra_env.delete_host(host_id=host_id)
def cancel_install(self):
self.api_client.cancel_cluster_install(cluster_id=self.id)
def get_bootstrap_hostname(self):
hosts = self.get_hosts_by_role(consts.NodeRoles.MASTER)
for host in hosts:
if host.get("bootstrap"):
log.info("Bootstrap node is: %s", host["requested_hostname"])
return host["requested_hostname"]
def get_hosts_by_role(self, role, hosts=None):
hosts = hosts or self.api_client.get_cluster_hosts(self.id)
nodes_by_role = []
for host in hosts:
if host["role"] == role:
nodes_by_role.append(host)
log.info(f"Found hosts: {nodes_by_role}, that has the role: {role}")
return nodes_by_role
def get_random_host_by_role(self, role):
return random.choice(self.get_hosts_by_role(role))
def get_reboot_required_hosts(self):
return self.api_client.get_hosts_in_statuses(
cluster_id=self.id, statuses=[consts.NodesStatus.RESETING_PENDING_USER_ACTION]
)
def reboot_required_nodes_into_iso_after_reset(self):
hosts_to_reboot = self.get_reboot_required_hosts()
self.nodes.run_for_given_nodes_by_cluster_hosts(cluster_hosts=hosts_to_reboot, func_name="reset")
def wait_for_one_host_to_be_in_wrong_boot_order(self, fall_on_error_status=True):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER,
fall_on_error_status=fall_on_error_status,
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_at_least_one_host_to_be_in_reboot_timeout(self, fall_on_error_status=True, nodes_count=1):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.REBOOT_TIMEOUT,
nodes_count=nodes_count,
fall_on_error_status=fall_on_error_status,
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_hosts_to_be_in_wrong_boot_order(
self, nodes_count, timeout=consts.PENDING_USER_ACTION_TIMEOUT, fall_on_error_status=True
):
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER,
nodes_count=nodes_count,
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def wait_for_ready_to_install(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.READY],
timeout=consts.READY_TIMEOUT,
)
# This code added due to BZ:1909997, temporarily checking if help to prevent unexpected failure
time.sleep(10)
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.READY],
timeout=consts.READY_TIMEOUT,
)
def is_in_cancelled_status(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.CANCELLED]
)
def is_in_error(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.ERROR]
)
def is_finalizing(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.FINALIZING]
)
def is_installing(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLING]
)
def reset_install(self):
self.api_client.reset_cluster_install(cluster_id=self.id)
def is_in_insufficient_status(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSUFFICIENT]
)
def wait_for_hosts_to_install(
self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True, nodes_count: int = None
):
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLED],
nodes_count=nodes_count or self.nodes.nodes_count,
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def wait_for_operators_to_finish(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True):
operators = self.get_operators()
if fall_on_error_status:
statuses = [consts.OperatorStatus.AVAILABLE]
else:
statuses = [consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED]
operators_utils.wait_till_all_operators_are_in_status(
client=self.api_client,
cluster_id=self.id,
operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.BUILTIN)),
operator_types=[OperatorType.BUILTIN],
statuses=statuses,
timeout=timeout,
fall_on_error_status=False,
)
operators_utils.wait_till_all_operators_are_in_status(
client=self.api_client,
cluster_id=self.id,
operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.OLM)),
operator_types=[OperatorType.OLM],
statuses=[consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED],
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def is_operator_in_status(self, operator_name, status):
return operators_utils.is_operator_in_status(
operators=self.get_operators(), operator_name=operator_name, status=status
)
def wait_for_install(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLED],
timeout=timeout,
)
def _set_hostnames_and_roles(self):
cluster_id = self.id
hosts = self.to_cluster_hosts(self.api_client.get_cluster_hosts(cluster_id))
nodes = self.nodes.get_nodes(refresh=True)
for host in hosts:
if host.has_hostname():
continue
name = self.find_matching_node_name(host, nodes)
assert name is not None, (
f"Failed to find matching node for host with mac address {host.macs()}"
f" nodes: {[(n.name, n.ips, n.macs) for n in nodes]}"
)
if self.nodes.nodes_count == 1:
role = None
else:
role = consts.NodeRoles.MASTER if consts.NodeRoles.MASTER in name else consts.NodeRoles.WORKER
self._infra_env.update_host(host_id=host.get_id(), host_role=role, host_name=name)
def _ha_not_none(self):
return (
self._high_availability_mode != consts.HighAvailabilityMode.NONE
and self._config.platform != consts.Platforms.NONE
)
def download_image(self, iso_download_path: str = None) -> Path:
if self._infra_env is None:
log.warning("No infra_env found. Generating infra_env and downloading ISO")
return self.generate_and_download_infra_env(
iso_download_path=iso_download_path or self._config.iso_download_path,
iso_image_type=self._config.iso_image_type,
)
return self._infra_env.download_image(iso_download_path)
@JunitTestCase()
def prepare_for_installation(self, **kwargs):
super(Cluster, self).prepare_for_installation(**kwargs)
self.nodes.wait_for_networking()
self._set_hostnames_and_roles()
if self._high_availability_mode != consts.HighAvailabilityMode.NONE:
self.set_host_roles(len(self.nodes.get_masters()), len(self.nodes.get_workers()))
self.set_network_params(controller=self.nodes.controller)
# in case of None platform we need to specify dns records before hosts are ready
if self._config.platform == consts.Platforms.NONE:
self._configure_load_balancer()
self.nodes.controller.set_dns_for_user_managed_network()
elif self._high_availability_mode == consts.HighAvailabilityMode.NONE:
main_cidr = self.get_primary_machine_cidr()
ip = Cluster.get_ip_for_single_node(self.api_client, self.id, main_cidr)
self.nodes.controller.set_single_node_ip(ip)
self.nodes.controller.set_dns(api_vip=ip, ingress_vip=ip)
self.wait_for_ready_to_install()
# in case of regular cluster, need to set dns after vips exits
# in our case when nodes are ready, vips will be there for sure
if self._ha_not_none():
vips_info = self.__class__.get_vips_from_cluster(self.api_client, self.id)
self.nodes.controller.set_dns(api_vip=vips_info["api_vip"], ingress_vip=vips_info["ingress_vip"])
def download_kubeconfig_no_ingress(self, kubeconfig_path: str = None):
self.api_client.download_kubeconfig_no_ingress(self.id, kubeconfig_path or self._config.kubeconfig_path)
def download_kubeconfig(self, kubeconfig_path: str = None):
self.api_client.download_kubeconfig(self.id, kubeconfig_path or self._config.kubeconfig_path)
def download_installation_logs(self, cluster_tar_path):
self.api_client.download_cluster_logs(self.id, cluster_tar_path)
def get_install_config(self):
return yaml.safe_load(self.api_client.get_cluster_install_config(self.id))
def get_admin_credentials(self):
return self.api_client.get_cluster_admin_credentials(self.id)
def register_dummy_host(self):
dummy_host_id = "b164df18-0ff1-4b85-9121-059f10f58f71"
self.api_client.register_host(self.id, dummy_host_id)
def host_get_next_step(self, host_id):
return self.api_client.host_get_next_step(self.id, host_id)
def host_post_step_result(self, host_id, step_type, step_id, exit_code, output):
self.api_client.host_post_step_result(
self.id, host_id, step_type=step_type, step_id=step_id, exit_code=exit_code, output=output
)
def host_update_install_progress(self, host_id, current_stage, progress_info=None):
self.api_client.host_update_progress(self.id, host_id, current_stage, progress_info=progress_info)
def host_complete_install(self):
self.api_client.complete_cluster_installation(cluster_id=self.id, is_success=True)
def setup_nodes(self, nodes, infra_env_config: BaseInfraEnvConfig):
self._infra_env = InfraEnv.generate(
self.api_client, infra_env_config, iso_image_type=self._config.iso_image_type
)
self._infra_env.download_image(iso_download_path=self._config.iso_download_path)
nodes.start_all()
self.wait_until_hosts_are_discovered()
return nodes.create_nodes_cluster_hosts_mapping(cluster=self)
def wait_for_cluster_validation(
self, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2
):
log.info("Wait until cluster %s validation %s is in status %s", self.id, validation_id, statuses)
try:
waiting.wait(
lambda: self.is_cluster_validation_in_status(
validation_section=validation_section, validation_id=validation_id, statuses=statuses
),
timeout_seconds=timeout,
sleep_seconds=interval,
waiting_for=f"Cluster validation to be in status {statuses}",
)
except BaseException:
log.error(
"Cluster validation status is: %s",
utils.get_cluster_validation_value(
self.api_client.cluster_get(self.id), validation_section, validation_id
),
)
raise
def is_cluster_validation_in_status(self, validation_section, validation_id, statuses):
log.info("Is cluster %s validation %s in status %s", self.id, validation_id, statuses)
try:
return (
utils.get_cluster_validation_value(
self.api_client.cluster_get(self.id), validation_section, validation_id
)
in statuses
)
except BaseException:
log.exception("Failed to get cluster %s validation info", self.id)
def wait_for_host_validation(
self, host_id, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2
):
log.info("Wait until host %s validation %s is in status %s", host_id, validation_id, statuses)
try:
waiting.wait(
lambda: self.is_host_validation_in_status(
host_id=host_id,
validation_section=validation_section,
validation_id=validation_id,
statuses=statuses,
),
timeout_seconds=timeout,
sleep_seconds=interval,
waiting_for=f"Host validation to be in status {statuses}",
)
except BaseException:
log.error(
"Host validation status is: %s",
utils.get_host_validation_value(
self.api_client.cluster_get(self.id), host_id, validation_section, validation_id
),
)
raise
def is_host_validation_in_status(self, host_id, validation_section, validation_id, statuses):
log.info("Is host %s validation %s in status %s", host_id, validation_id, statuses)
try:
return (
utils.get_host_validation_value(
self.api_client.cluster_get(self.id), host_id, validation_section, validation_id
)
in statuses
)
except BaseException:
log.exception("Failed to get cluster %s validation info", self.id)
def wait_for_cluster_to_be_in_installing_pending_user_action_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLING_PENDING_USER_ACTION],
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_cluster_to_be_in_installing_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLING],
timeout=consts.START_CLUSTER_INSTALLATION_TIMEOUT,
)
def wait_for_cluster_to_be_in_finalizing_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.FINALIZING, consts.ClusterStatus.INSTALLED],
timeout=consts.CLUSTER_INSTALLATION_TIMEOUT,
break_statuses=[consts.ClusterStatus.ERROR],
)
def wait_for_cluster_to_be_in_status(self, statuses, timeout=consts.ERROR_TIMEOUT):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=statuses,
timeout=timeout,
)
@classmethod
def reset_cluster_and_wait_for_ready(cls, cluster):
# Reset cluster install
cluster.reset_install()
assert cluster.is_in_insufficient_status()
# Reboot required nodes into ISO
cluster.reboot_required_nodes_into_iso_after_reset()
# Wait for hosts to be rediscovered
cluster.wait_until_hosts_are_discovered()
cluster.wait_for_ready_to_install()
def get_events(self, host_id="", infra_env_id=""):
warnings.warn(
"Cluster.get_events is now deprecated, use EventsHandler.get_events instead",
PendingDeprecationWarning,
)
handler = EventsHandler(self.api_client)
return handler.get_events(host_id, self.id, infra_env_id)
def _configure_load_balancer(self):
main_cidr = self.get_primary_machine_cidr()
secondary_cidr = self.nodes.controller.get_provisioning_cidr()
master_ips = self.get_master_ips(self.api_client, self.id, main_cidr) + self.get_master_ips(
self.api_client, self.id, secondary_cidr
)
worker_ips = self.get_worker_ips(self.api_client, self.id, main_cidr)
load_balancer_ip = str(IPNetwork(main_cidr).ip + 1)
tf = terraform_utils.TerraformUtils(working_dir=self.nodes.controller.tf_folder)
lb_controller = LoadBalancerController(tf)
lb_controller.set_load_balancing_config(load_balancer_ip, master_ips, worker_ips)
@classmethod
def _get_namespace_index(cls, libvirt_network_if):
# Hack to retrieve namespace index - does not exist in tests
matcher = re.match(r"^tt(\d+)$", libvirt_network_if)
return int(matcher.groups()[0]) if matcher is not None else 0
def wait_for_event(self, event_to_find, reference_time, params_list=None, host_id="", infra_env_id="", timeout=10):
warnings.warn(
"Cluster.wait_for_event is now deprecated, use EventsHandler.wait_for_event instead",
PendingDeprecationWarning,
)
handler = EventsHandler(self.api_client)
return handler.wait_for_event(
event_to_find, reference_time, params_list, host_id, infra_env_id, self.id, timeout
)
@staticmethod
def get_inventory_host_nics_data(host: dict, ipv4_first=True):
def get_network_interface_ip(interface):
addresses = (
interface.ipv4_addresses + interface.ipv6_addresses
if ipv4_first
else interface.ipv6_addresses + interface.ipv4_addresses
)
return addresses[0].split("/")[0] if len(addresses) > 0 else None
inventory = models.Inventory(**json.loads(host["inventory"]))
interfaces_list = [models.Interface(**interface) for interface in inventory.interfaces]
return [
{
"name": interface.name,
"model": interface.product,
"mac": interface.mac_address,
"ip": get_network_interface_ip(interface),
"speed": interface.speed_mbps,
}
for interface in interfaces_list
]
@staticmethod
def get_hosts_nics_data(hosts: list, ipv4_first=True):
return [Cluster.get_inventory_host_nics_data(h, ipv4_first=ipv4_first) for h in hosts]
@staticmethod
def get_cluster_hosts(cluster: models.cluster.Cluster) -> List[ClusterHost]:
return [ClusterHost(h) for h in cluster.hosts]
@staticmethod
def to_cluster_hosts(hosts: List[Dict[str, Any]]) -> List[ClusterHost]:
return [ClusterHost(models.Host(**h)) for h in hosts]
def get_cluster_cidrs(self, hosts: List[ClusterHost]) -> Set[str]:
cidrs = set()
for host in hosts:
ips = []
if self.nodes.is_ipv4:
ips += host.ipv4_addresses()
if self.nodes.is_ipv6:
ips += host.ipv6_addresses()
for host_ip in ips:
cidr = network_utils.get_cidr_by_interface(host_ip)
cidrs.add(cidr)
return cidrs
def get_cluster_matching_cidrs(self, hosts: List[ClusterHost]) -> Set[str]:
cluster_cidrs = self.get_cluster_cidrs(hosts)
matching_cidrs = set()
for cidr in cluster_cidrs:
for host in hosts:
interfaces = []
if self.nodes.is_ipv4:
interfaces += host.ipv4_addresses()
if self.nodes.is_ipv6:
interfaces += host.ipv6_addresses()
if not network_utils.any_interface_in_cidr(interfaces, cidr):
break
matching_cidrs.add(cidr)
return matching_cidrs
@staticmethod
def get_ip_for_single_node(client, cluster_id, machine_cidr, ipv4_first=True):
cluster_info = client.cluster_get(cluster_id).to_dict()
if len(cluster_info["hosts"]) == 0:
raise Exception("No host found")
network = IPNetwork(machine_cidr)
interfaces = Cluster.get_inventory_host_nics_data(cluster_info["hosts"][0], ipv4_first=ipv4_first)
for intf in interfaces:
ip = intf["ip"]
if IPAddress(ip) in network:
return ip
raise Exception("IP for single node not found")
@staticmethod
def get_ips_for_role(client, cluster_id, network, role):
cluster_info = client.cluster_get(cluster_id).to_dict()
ret = []
net = IPNetwork(network)
hosts_interfaces = Cluster.get_hosts_nics_data([h for h in cluster_info["hosts"] if h["role"] == role])
for host_interfaces in hosts_interfaces:
for intf in host_interfaces:
ip = IPAddress(intf["ip"])
if ip in net:
ret = ret + [intf["ip"]]
return ret
@staticmethod
def get_master_ips(client, cluster_id, network):
return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.MASTER)
@staticmethod
def get_worker_ips(client, cluster_id, network):
return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.WORKER)
@staticmethod
def get_vips_from_cluster(client, cluster_id):
cluster_info = client.cluster_get(cluster_id)
return dict(api_vip=cluster_info.api_vip, ingress_vip=cluster_info.ingress_vip)
def get_host_disks(self, host, filter=None):
hosts = self.get_hosts()
selected_host = [h for h in hosts if h["id"] == host["id"]]
disks = json.loads(selected_host[0]["inventory"])["disks"]
if not filter:
return [disk for disk in disks]
else:
return [disk for disk in disks if filter(disk)]
def get_inventory_host_ips_data(self, host: dict):
nics = self.get_inventory_host_nics_data(host)
return [nic["ip"] for nic in nics]
# needed for None platform and single node
# we need to get ip where api is running
def get_kube_api_ip(self, hosts):
for host in hosts:
for ip in self.get_inventory_host_ips_data(host):
if self.is_kubeapi_service_ready(ip):
return ip
def get_api_vip(self, cluster):
cluster = cluster or self.get_details()
api_vip = cluster.api_vip
if not api_vip and cluster.user_managed_networking:
log.info("API VIP is not set, searching for api ip on masters")
masters = self.get_hosts_by_role(consts.NodeRoles.MASTER, hosts=cluster.to_dict()["hosts"])
api_vip = self._wait_for_api_vip(masters)
log.info("api vip is %s", api_vip)
return api_vip
def _wait_for_api_vip(self, hosts, timeout=180):
"""Enable some grace time for waiting for API's availability."""
return waiting.wait(
lambda: self.get_kube_api_ip(hosts=hosts), timeout_seconds=timeout, sleep_seconds=5, waiting_for="API's IP"
)
def find_matching_node_name(self, host: ClusterHost, nodes: List[Node]) -> Union[str, None]:
# Looking for node matches the given host by its mac address (which is unique)
for node in nodes:
for mac in node.macs:
if mac.lower() in host.macs():
return node.name
# IPv6 static ips
if self._config.is_static_ip:
mappings = static_network.get_name_to_mac_addresses_mapping(self.nodes.controller.tf_folder)
for mac in host.macs():
for name, macs in mappings.items():
if mac in macs:
return name
return None
@staticmethod
def is_kubeapi_service_ready(ip_or_dns):
"""Validate if kube-api is ready on given address."""
with contextlib.suppress(ValueError):
# IPv6 addresses need to be surrounded with square-brackets
# to differentiate them from domain names
if ipaddress.ip_address(ip_or_dns).version == 6:
ip_or_dns = f"[{ip_or_dns}]"
try:
response = requests.get(f"https://{ip_or_dns}:6443/readyz", verify=False, timeout=1)
return response.ok
except BaseException:
return False
def wait_and_kill_installer(self, host):
# Wait for specific host to be in installing in progress
self.wait_for_specific_host_status(host=host, statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS])
# Kill installer to simulate host error
selected_node = self.nodes.get_node_from_cluster_host(host)
selected_node.kill_installer()
def get_api_vip_from_cluster(api_client, cluster_info: Union[dict, models.cluster.Cluster], pull_secret):
import warnings
from tests.config import ClusterConfig, InfraEnvConfig
warnings.warn(
"Soon get_api_vip_from_cluster will be deprecated. Avoid using or adding new functionality to "
"this function. The function and solution for that case have not been determined yet. It might be "
"on another module, or as a classmethod within Cluster class."
" For more information see https://issues.redhat.com/browse/MGMT-4975",
PendingDeprecationWarning,
)
if isinstance(cluster_info, dict):
cluster_info = models.cluster.Cluster(**cluster_info)
cluster = Cluster(
api_client=api_client,
infra_env_config=InfraEnvConfig(),
config=ClusterConfig(
cluster_name=ClusterName(cluster_info.name),
pull_secret=pull_secret,
ssh_public_key=cluster_info.ssh_public_key,
cluster_id=cluster_info.id,
),
nodes=None,
)
return cluster.get_api_vip(cluster=cluster_info)
| 42.450853 | 120 | 0.679813 | [
"Apache-2.0"
] | empovit/assisted-test-infra | discovery-infra/test_infra/helper_classes/cluster.py | 52,257 | Python |
"""
Purpose: Unsupervised learning sampler
Date created: 2020-11-06
Ref repo: https://github.com/White-Link/UnsupervisedScalableRepresentationLearningTimeSeries
Local folder: C:/Users/Work1/Desktop/Info/GitHub/python-examples-main/notebook-samples/unsupervised
Contributor(s):
Mark M.
"""
import os
from pathlib import Path
# Set local folder if developing/debugging
myuser = os.environ["username"]
PROJECT_FOLDER = Path(rf"C:\Users\{myuser}\Desktop\Info\GitHub\python-examples-main\notebook-samples\unsupervised")
os.chdir(PROJECT_FOLDER)
from UnsupervisedTSRepo import scikit_wrappers
import gc
import zipfile
import requests
from io import BytesIO, StringIO
# Data sci and dat processing imports
import scipy as sp
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import sklearn
from sklearn import cluster
from sklearn import neighbors
import torch
import torch.nn as nn
import torch.optim as optim
pd.set_option("mode.chained_assignment", None)
pd.set_option("display.width", 120)
pd.set_option("display.date_yearfirst", True)
pd.set_option("display.max_colwidth", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.max_info_rows", 10000)
gc.enable()
# Check for CUDA
CUDA_TF: bool = False
if torch.cuda.is_available():
print("Using CUDA...")
CUDA_TF = True
GPU = 0
zip_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
def import_zipfile_data(URL = zip_url):
with requests.Session() as s:
tmp = s.get(URL)
with zipfile.ZipFile(BytesIO(tmp.content)) as zfo:
with zfo.open("household_power_consumption.txt") as zfi:
tmp = StringIO(zfi.read().decode("utf-8"))
data_ = pd.read_csv(tmp, sep=";", decimal=",", header=0, low_memory=False)
del tmp
return data_
data = import_zipfile_data(zip_url)
data.loc[:, "Date"] = pd.to_datetime(data.loc[:, "Date"], yearfirst=True)
data.loc[:, "Time"] = pd.to_datetime(data.loc[:, "Time"], format="%H:%M:%S").dt.time
#dataset = data.transpose(pd.array(data))[2].reshape(1, 1, -1)
# Update missing values with the "last seen" value.
# This probably works better for timeseries than other data
# since order is important here.
dataset = np.transpose(np.array(data))[2].reshape(1, 1, -1)
for idx in range(np.shape(dataset)[2]):
if dataset[0, 0, idx] == "?":
dataset[0, 0, idx] = dataset[0, 0, idx - 1]
dataset = dataset.astype(np.float32)
# Create training and testing sets
train = dataset[:, :, :500000]
test = dataset[:, :, 500000:]
# Normalization
mu_ = np.mean(dataset)
sigma_ = np.std(dataset)
normalize = lambda d, mean, sigma: (d - mean) / sigma
dataset = normalize(dataset, mu_, sigma_)
train = normalize(train, mu_, sigma_)
test = normalize(test, mu_, sigma_)
print(f"Normalized data set metrics:\n\tMean: {np.mean(dataset)}\n\tVariance: {np.var(dataset)}")
# Feature learning
# Train new model?
training = True
model_path = PROJECT_FOLDER.joinpath(r"data\HouseholdPowerConsumption_yearly")
# hyperparams = {
# "batch_size": 1,
# "channels": 30,
# "compared_length": None,
# "depth": 10,
# "nb_steps": 400,
# "in_channels": 1,
# "kernel_size": 3,
# "penalty": None,
# "early_stopping": None,
# "lr": 0.001,
# "nb_random_samples": 10,
# "negative_penalty": 1,
# "out_channels": 160,
# "reduced_size": 80,
# "cuda": CUDA_TF,
# "gpu": GPU
# }
# encoder_yearly = scikit_wrappers.CausalCNNEncoderClassifier()
# encoder_yearly.set_params(**hyperparams)
# if training:
# encoder_yearly.fit_encoder(train, save_memory=True, verbose=True)
# encoder_yearly.save_encoder(model_path.as_posix())
# else:
# encoder_yearly.load_encoder(model_path.as_posix())
torch.cuda.empty_cache()
"""" For local zipfile data
from io import StringIO
with zipfile.ZipFile("household_power_consumption.zip") as zfo:
with zfo.open("household_power_consumption.txt") as zfi:
tmp = StringIO(zfi.read().decode("utf-8"))
data = pd.read_csv(tmp, sep=";", decimal=",", header=0, low_memory=False)
del tmp
"""
"""
import hmac
import pickle
import hashlib
import binascii
def create_sha256_signature(key, message):
byte_key = binascii.unhexlify(key)
message = message.encode()
return hmac.new(byte_key, message, hashlib.sha256).hexdigest().upper()
create_sha256_signature("E49756B4C8FAB4E48222A3E7F3B97CC3", "TEST STRING")
"""
| 23.978836 | 115 | 0.703442 | [
"MIT"
] | MarkMoretto/python-examples-main | notebook-samples/unsupervised/pred_electricity_consumption.py | 4,532 | Python |
"""
Copyright (c) 2021 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from io import BufferedReader, BytesIO
import pytest
import requests
import responses
from flexmock import flexmock
from atomic_reactor.util import get_retrying_requests_session
from atomic_reactor.utils.pnc import PNCUtil
PNC_BASE_API_URL = 'http://pnc.localhost/pnc-rest/v2'
PNC_GET_SCM_ARCHIVE_PATH = 'builds/{}/scm-archive'
def mock_pnc_map():
return {'base_api_url': PNC_BASE_API_URL,
'get_scm_archive_path': PNC_GET_SCM_ARCHIVE_PATH}
@pytest.mark.usefixtures('user_params')
class TestGetSCMArchiveFromBuildID(object):
@responses.activate
def test_connection_filename_in_header(self):
build_id = '1234'
filename = 'source.tar.gz'
scm_url = f'https://code.example.com/{filename};sf=tgz'
content = b'abc'
reader = BufferedReader(BytesIO(content), buffer_size=1)
# to mock this URL we have to construct it manually first
get_scm_archive_request_url = PNC_BASE_API_URL + '/' + PNC_GET_SCM_ARCHIVE_PATH
responses.add(responses.GET, get_scm_archive_request_url.format(build_id), body=reader,
status=302, headers={'Location': scm_url})
responses.add(responses.HEAD, scm_url, body='', status=200,
headers={'Content-disposition': f'filename="{filename}"'})
pnc_util = PNCUtil(mock_pnc_map())
url, dest_filename = pnc_util.get_scm_archive_from_build_id(build_id)
assert url == scm_url
assert dest_filename == filename
@responses.activate
def test_connection_filename_in_url(self):
build_id = '1234'
filename = 'source.tar.gz'
scm_url = f'https://code.example.com/{filename}'
# to mock this URL we have to construct it manually first
get_scm_archive_request_url = PNC_BASE_API_URL + '/' + PNC_GET_SCM_ARCHIVE_PATH
responses.add(responses.GET, get_scm_archive_request_url.format(build_id), body='',
status=302, headers={'Location': scm_url})
pnc_util = PNCUtil(mock_pnc_map())
url, dest_filename = pnc_util.get_scm_archive_from_build_id(build_id)
assert url == scm_url
assert dest_filename == filename
def test_connection_failure(self):
build_id = '1234'
session = get_retrying_requests_session()
(flexmock(session)
.should_receive('get')
.and_raise(requests.exceptions.RetryError))
pnc_util = PNCUtil(mock_pnc_map(), session)
with pytest.raises(requests.exceptions.RetryError):
pnc_util.get_scm_archive_from_build_id(build_id)
| 33.457831 | 95 | 0.695355 | [
"BSD-3-Clause"
] | hjmodi/atomic-reactor | tests/utils/test_pnc.py | 2,777 | Python |
class Beam(object):
def __init__(self):
super().__init__()
def get_number_of_rays(self):
raise NotImplementedError("method is abstract")
def get_rays(self):
raise NotImplementedError("method is abstract")
def get_ray(self, ray_index):
raise NotImplementedError("method is abstract")
def duplicate(self):
raise NotImplementedError("method is abstract")
def merge(self, other_beam):
raise NotImplementedError("method is abstract")
| 23.136364 | 55 | 0.675835 | [
"MIT"
] | oasys-kit/rafry | rafry/raytracer/beam.py | 509 | Python |
from .hflip import hflip
from .resize import resize
from .pad import pad
from .random_crop import random_crop
from .to_tensor import to_tensor
from .random_erasing import random_erasing
from .random_sized_rect_crop import random_sized_rect_crop
def transforms(item, cfg, mode):
"""
:param item: sample = deepcopy(self.items[index])
:param cfg: cfg
:return:
eval() transform str to list, dict, tuple. Here is a series of the transform methods in turn.
"""
transforms_dataset_factory = {
'train': cfg.dataset.train,
'test': cfg.dataset.test
}
if transforms_dataset_factory[mode].before_to_tensor_transform_list is not None:
for t in transforms_dataset_factory[mode].before_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
item = to_tensor(item, cfg)
if transforms_dataset_factory[mode].after_to_tensor_transform_list is not None:
for t in transforms_dataset_factory[mode].after_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
return item
| 34.741935 | 97 | 0.717734 | [
"MIT"
] | nickhuang1996/HJL-re-id | MDRSREID/utils/data_utils/transforms/torch_transforms/__init__.py | 1,077 | Python |
import torch
import torch.nn as nn
import math
from arcface.utils import l2_norm, Flatten, SentVec_TFIDF
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class ChannelAttention(nn.Module):
def __init__(self, in_planes, ratio=16):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc1 = nn.Conv2d(in_planes, in_planes // 16, 1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv2d(in_planes // 16, in_planes, 1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
out = avg_out + max_out
return self.sigmoid(out)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.ca = ChannelAttention(planes)
self.sa = SpatialAttention()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.ca(out) * out
out = self.sa(out) * out
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.ca = ChannelAttention(planes * 4)
self.sa = SpatialAttention()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.ca(out) * out
out = self.sa(out) * out
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
MODEL = {
50:{
'layers': [3, 4, 6, 3]
},
101:{
'layers': [3, 4, 23, 3]
},
152:{
'layers': [3, 8, 36, 3]
}
}
class ResNetCBAM(nn.Module):
def __init__(self, config):
super(ResNetCBAM, self).__init__()
embedding_size = config.embedding_size
drop_ratio = config.drop_ratio
model_dic = MODEL[config.num_layers_c]
layers = model_dic['layers']
# embedding_size = 2048
# drop_ratio = 0.1
# layers = [3, 4, 23, 3]
# self.sentvec = SentVec_TFIDF(embedding_size=embedding_size, root_dir='data/')
block = Bottleneck
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
# self.avgpool = nn.AvgPool2d(4, stride=1)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512* block.expansion, 1000)
self.bn_last = nn.BatchNorm1d(embedding_size)
self.bn_last.bias.requires_grad_(False)
# self.output_layer = nn.Sequential(
# nn.BatchNorm2d(512 * block.expansion),
# nn.Dropout(drop_ratio),
# Flatten(),
# nn.Linear(512 * block.expansion, embedding_size),
# nn.BatchNorm1d(embedding_size))
# self.last_layer = nn.Sequential(
# nn.Linear(2*embedding_size, embedding_size),
# nn.BatchNorm1d(embedding_size)
# )
'''if not config.resume:
self._initialize_weights()
'''
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
X = stats.truncnorm(-2, 2, scale=0.01)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
# print(x.size())
x = self.layer2(x)
# print(x.size())
x = self.layer3(x)
# print(x.size())
x = self.layer4(x)
# print(x.size())
x = self.avgpool(x)
# x = self.output_layer(x)
# sent = self.sentvec(text)
# x = torch.cat((x, sent), dim=1)
# x = self.last_layer(x)
x = torch.flatten(x, 1)
if self.training:
return x, self.bn_last(x)
else:
return l2_norm(self.bn_last(x))
if __name__ == "__main__":
net = ResNetCBAM('aa')
net.load_state_dict(torch.load('trained_models/resnet_cbam_101.pth'))
# del net.output_layer
# net.bn_last = nn.BatchNorm1d(2048)
# l = [3, 4, 6, 3]
# for i in range(3):
# net.layer1[i].ca = ChannelAttention(64 * 4)
# net.layer1[i].sa = SpatialAttention()
# for i in range(4):
# net.layer2[i].ca = ChannelAttention(64 * 8)
# net.layer2[i].sa = SpatialAttention()
# for i in range(6):
# net.layer3[i].ca = ChannelAttention(64 * 16)
# net.layer3[i].sa = SpatialAttention()
# for i in range(3):
# net.layer4[i].ca = ChannelAttention(64 * 32)
# net.layer4[i].sa = SpatialAttention()
# # net.sentvec = SentVec_TFIDF(embedding_size=512, root_dir='data/')
# net.output_layer = nn.Sequential(
# nn.BatchNorm2d(512* 4),
# nn.Dropout(0.1),
# Flatten(),
# nn.Linear(512 * 4, 4096),
# nn.BatchNorm1d(4096))
# del net.fc
torch.save(net.state_dict(), 'trained_models/resnet_cbam_101.pth')
a = torch.randn(5,3,224,224)
b = net(a)
print(b[0].size()) | 33.618881 | 87 | 0.562142 | [
"MIT"
] | DerryHub/the-TaobaoLive-Commodity-Identify-Competition | arcface/resnet_cbam.py | 9,615 | Python |
# Automatically generated by pb2py
# fmt: off
if False:
from typing_extensions import Literal
Transfer = 0 # type: Literal[0]
RegisterSecondPassphrase = 1 # type: Literal[1]
RegisterDelegate = 2 # type: Literal[2]
CastVotes = 3 # type: Literal[3]
RegisterMultisignatureAccount = 4 # type: Literal[4]
CreateDapp = 5 # type: Literal[5]
TransferIntoDapp = 6 # type: Literal[6]
TransferOutOfDapp = 7 # type: Literal[7]
| 30.571429 | 53 | 0.721963 | [
"MIT"
] | ph4r05/monero-agent | monero_glue/messages/LiskTransactionType.py | 428 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import inspect
import setuptools
from setuptools.command.test import test as TestCommand
from setuptools import setup
if sys.version_info < (3, 4, 0):
sys.stderr.write('FATAL: This script needs to be run with Python 3.4+\n')
sys.exit(1)
__location__ = os.path.join(os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe())))
def read_version(package):
data = {}
with open(os.path.join(package, '__init__.py'), 'r') as fd:
exec(fd.read(), data)
return data['__version__']
NAME = 'github-maintainer'
MAIN_PACKAGE = 'github_maintainer'
VERSION = read_version(MAIN_PACKAGE)
DESCRIPTION = 'CLI support tool for GitHub repo maintainers'
LICENSE = 'Apache License 2.0'
URL = 'https://github.com/zalando-stups/github-maintainer-cli'
AUTHOR = 'Henning Jacobs'
EMAIL = '[email protected]'
COVERAGE_XML = True
COVERAGE_HTML = False
JUNIT_XML = True
# Add here all kinds of additional classifiers as defined under
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
]
CONSOLE_SCRIPTS = ['github-maintainer = github_maintainer.cli:main']
class PyTest(TestCommand):
user_options = [('cov=', None, 'Run coverage'), ('cov-xml=', None, 'Generate junit xml report'), ('cov-html=',
None, 'Generate junit html report'), ('junitxml=', None, 'Generate xml of test results')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.cov = None
self.cov_xml = False
self.cov_html = False
self.junitxml = None
def finalize_options(self):
TestCommand.finalize_options(self)
if self.cov is not None:
self.cov = ['--cov', self.cov, '--cov-report', 'term-missing']
if self.cov_xml:
self.cov.extend(['--cov-report', 'xml'])
if self.cov_html:
self.cov.extend(['--cov-report', 'html'])
if self.junitxml is not None:
self.junitxml = ['--junitxml', self.junitxml]
def run_tests(self):
try:
import pytest
except:
raise RuntimeError('py.test is not installed, run: pip install pytest')
params = {'args': self.test_args}
if self.cov:
params['args'] += self.cov
if self.junitxml:
params['args'] += self.junitxml
params['args'] += ['--doctest-modules', MAIN_PACKAGE, '-s']
errno = pytest.main(**params)
sys.exit(errno)
def get_install_requirements(path):
content = open(os.path.join(__location__, path)).read()
return [req for req in content.split('\\n') if req != '']
def read(fname):
return open(os.path.join(__location__, fname), encoding='utf-8').read()
def setup_package():
# Assemble additional setup commands
cmdclass = {}
cmdclass['test'] = PyTest
install_reqs = get_install_requirements('requirements.txt')
command_options = {'test': {'test_suite': ('setup.py', 'tests'), 'cov': ('setup.py', MAIN_PACKAGE)}}
if JUNIT_XML:
command_options['test']['junitxml'] = 'setup.py', 'junit.xml'
if COVERAGE_XML:
command_options['test']['cov_xml'] = 'setup.py', True
if COVERAGE_HTML:
command_options['test']['cov_html'] = 'setup.py', True
setup(
name=NAME,
version=VERSION,
url=URL,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
keywords='github git project maintainer',
long_description=read('README.rst'),
classifiers=CLASSIFIERS,
test_suite='tests',
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
install_requires=install_reqs,
setup_requires=['six', 'flake8'],
cmdclass=cmdclass,
tests_require=['pytest-cov', 'pytest'],
command_options=command_options,
entry_points={'console_scripts': CONSOLE_SCRIPTS},
)
if __name__ == '__main__':
setup_package()
| 31.514286 | 114 | 0.636673 | [
"Apache-2.0"
] | hjacobs/github-maintainer-cli | setup.py | 4,412 | Python |
from mopidy import backend
import pykka
from mopidy_funkwhale import api, client, library, playback, playlists
class FunkwhaleBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config, audio):
super(FunkwhaleBackend, self).__init__()
self.api = api.FunkwhaleApi(config)
self.client = client.FunkwhaleClient(self.api)
self.audio = audio
self.library = library.FunkwhaleLibraryProvider(backend=self)
self.playback = playback.FunkwhalePlaybackProvider(audio=audio,
backend=self)
self.playlists = playlists.FunkwhalePlaylistsProvider(backend=self)
self.uri_schemes = ['funkwhale']
def on_start(self):
self.api.login()
| 35.181818 | 75 | 0.665375 | [
"Apache-2.0"
] | gjabell/mopidy-funkwhale | mopidy_funkwhale/backend.py | 774 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from dve.io.table import TableDataBase
from jhunt.qt.widgets.mainwindow import MainWindow
import datetime
from PyQt5.QtWidgets import QApplication
APPLICATION_NAME = "JHunt"
def main():
adverts_file_name = ".jhunt_adverts"
adverts_data_schema = [
{"header": "Date", "default_value": datetime.datetime.now(), "dtype": datetime.datetime, "mapped": False},
{"header": "Score", "default_value": int(0), "dtype": int, "mapped": False, "min_value": 0, "max_value": 5},
{"header": "Application", "default_value": False, "dtype": bool, "mapped": False},
{"header": "Category", "default_value": "Entreprise", "dtype": str, "mapped": False, "values": ("Entreprise", "IR/IE", "PostDoc")},
{"header": "Organization", "default_value": "", "dtype": str, "mapped": False},
{"header": "Ref.", "default_value": "", "dtype": str, "mapped": False},
{"header": "Title", "default_value": "", "dtype": str, "mapped": False},
{"header": "URL", "default_value": "", "dtype": str, "mapped": True, "widget": "QLineEdit"},
{"header": "Pros", "default_value": "", "dtype": str, "mapped": True, "widget": "QPlainTextEdit"},
{"header": "Cons", "default_value": "", "dtype": str, "mapped": True, "widget": "QPlainTextEdit"},
{"header": "Description", "default_value": "", "dtype": str, "mapped": True, "widget": "QPlainTextEdit"}
]
adverts_database = TableDataBase(adverts_data_schema, adverts_file_name)
websites_file_name = ".jhunt_websites"
websites_data_schema = [
{"header": "Date", "default_value": datetime.datetime.now(), "dtype": datetime.datetime, "mapped": False, "hidden": True},
{"header": "Name", "default_value": "", "dtype": str, "mapped": False},
{"header": "Score", "default_value": int(0), "dtype": int, "mapped": False, "min_value": 0, "max_value": 3},
{"header": "Category", "default_value": "Private Company", "dtype": str, "mapped": False, "values": ("Private Company", "Public Research", "School", "Search Engine")},
{"header": "Last visit", "default_value": datetime.datetime.now(), "dtype": datetime.datetime, "mapped": False},
{"header": "Today status", "default_value": "None", "dtype": str, "mapped": False, "values": ("None", "Partial", "Full")},
{"header": "Description", "default_value": "", "dtype": str, "mapped": True, "widget": "QPlainTextEdit"},
{"header": "URL", "default_value": "", "dtype": str, "mapped": True, "widget": "QLineEdit"}
]
websites_database = TableDataBase(websites_data_schema, websites_file_name)
adverts_data = adverts_database.load() # TODO ?
websites_data = websites_database.load() # TODO ?
app = QApplication(sys.argv)
app.setApplicationName(APPLICATION_NAME)
# Make widgets
window = MainWindow(adverts_data, websites_data)
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
adverts_database.save(adverts_data) # TODO ?
websites_database.save(websites_data) # TODO ?
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
if __name__ == '__main__':
main()
| 57.520548 | 201 | 0.526554 | [
"MIT"
] | jeremiedecock/jhunt | jhunt/qt/main.py | 4,199 | Python |
# coding=utf-8
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file has been copied from
# https://github.com/mlcommons/inference/blob/r0.7/vision/medical_imaging/3d-unet/preprocess.py
import argparse
import numpy
import os
import pickle
import sys
import torch
from batchgenerators.augmentations.utils import pad_nd_image
from batchgenerators.utilities.file_and_folder_operations import subfiles
from nnunet.training.model_restore import load_model_and_checkpoint_files
from nnunet.inference.predict import preprocess_multithreaded
def preprocess_MLPerf(model, checkpoint_name, folds, fp16, list_of_lists, output_filenames, preprocessing_folder, num_threads_preprocessing):
assert len(list_of_lists) == len(output_filenames)
print("loading parameters for folds", folds)
trainer, params = load_model_and_checkpoint_files(model, folds, fp16, checkpoint_name=checkpoint_name)
print("starting preprocessing generator")
preprocessing = preprocess_multithreaded(trainer, list_of_lists, output_filenames, num_threads_preprocessing, None)
print("Preprocessing images...")
all_output_files = []
for preprocessed in preprocessing:
output_filename, (d, dct) = preprocessed
all_output_files.append(output_filename)
if isinstance(d, str):
data = np.load(d)
os.remove(d)
d = data
# Pad to the desired full volume
d = pad_nd_image(d, trainer.patch_size, "constant", None, False, None)
with open(os.path.join(preprocessing_folder, output_filename+ ".pkl"), "wb") as f:
pickle.dump([d, dct], f)
f.close()
return all_output_files
def preprocess_setup(preprocessed_data_dir):
print("Preparing for preprocessing data...")
# Validation set is fold 1
fold = 1
validation_fold_file = '../models/image_segmentation/tensorflow/3d_unet_mlperf/inference/nnUNet/folds/fold1_validation.txt'
# Make sure the model exists
model_dir = 'build/result/nnUNet/3d_fullres/Task043_BraTS2019/nnUNetTrainerV2__nnUNetPlansv2.mlperf.1'
model_path = os.path.join(model_dir, "plans.pkl")
assert os.path.isfile(model_path), "Cannot find the model file {:}!".format(model_path)
checkpoint_name = "model_final_checkpoint"
# Other settings
fp16 = False
num_threads_preprocessing = 12
raw_data_dir = 'build/raw_data/nnUNet_raw_data/Task043_BraTS2019/imagesTr'
# Open list containing validation images from specific fold (e.g. 1)
validation_files = []
with open(validation_fold_file) as f:
for line in f:
validation_files.append(line.rstrip())
# Create output and preprocessed directory
if not os.path.isdir(preprocessed_data_dir):
os.makedirs(preprocessed_data_dir)
# Create list of images locations (i.e. 4 images per case => 4 modalities)
all_files = subfiles(raw_data_dir, suffix=".nii.gz", join=False, sort=True)
list_of_lists = [[os.path.join(raw_data_dir, i) for i in all_files if i[:len(j)].startswith(j) and
len(i) == (len(j) + 12)] for j in validation_files]
# Preprocess images, returns filenames list
# This runs in multiprocess
print("Acually preprocessing data...")
preprocessed_files = preprocess_MLPerf(model_dir, checkpoint_name, fold, fp16, list_of_lists,
validation_files, preprocessed_data_dir, num_threads_preprocessing)
print("Saving metadata of the preprocessed data...")
with open(os.path.join(preprocessed_data_dir, "preprocessed_files.pkl"), "wb") as f:
pickle.dump(preprocessed_files, f)
print("Preprocessed data saved to {:}".format(preprocessed_data_dir))
print("Done!")
| 40.934579 | 141 | 0.736073 | [
"Apache-2.0"
] | Alavandar08/models | models/image_segmentation/tensorflow/3d_unet_mlperf/inference/nnUNet/preprocess.py | 4,380 | Python |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
class livecoin(Exchange):
def describe(self):
return self.deep_extend(super(livecoin, self).describe(), {
'id': 'livecoin',
'name': 'LiveCoin',
'countries': ['US', 'UK', 'RU'],
'rateLimit': 1000,
'userAgent': self.userAgents['chrome'],
'has': {
'fetchDepositAddress': True,
'fetchDeposits': True,
'CORS': False,
'fetchTickers': True,
'fetchCurrencies': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchOrders': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27980768-f22fc424-638a-11e7-89c9-6010a54ff9be.jpg',
'api': 'https://api.livecoin.net',
'www': 'https://www.livecoin.net',
'doc': 'https://www.livecoin.net/api?lang=en',
'referral': 'https://livecoin.net/?from=Livecoin-CQ1hfx44',
},
'api': {
'public': {
'get': [
'exchange/all/order_book',
'exchange/last_trades',
'exchange/maxbid_minask',
'exchange/order_book',
'exchange/restrictions',
'exchange/ticker', # omit params to get all tickers at once
'info/coinInfo',
],
},
'private': {
'get': [
'exchange/client_orders',
'exchange/order',
'exchange/trades',
'exchange/commission',
'exchange/commissionCommonInfo',
'payment/balances',
'payment/balance',
'payment/get/address',
'payment/history/size',
'payment/history/transactions',
],
'post': [
'exchange/buylimit',
'exchange/buymarket',
'exchange/cancellimit',
'exchange/selllimit',
'exchange/sellmarket',
'payment/out/capitalist',
'payment/out/card',
'payment/out/coin',
'payment/out/okpay',
'payment/out/payeer',
'payment/out/perfectmoney',
'payment/voucher/amount',
'payment/voucher/make',
'payment/voucher/redeem',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.18 / 100,
'taker': 0.18 / 100,
},
},
'commonCurrencies': {
'BTCH': 'Bithash',
'CPC': 'Capricoin',
'CPT': 'Cryptos', # conflict with CPT = Contents Protocol https://github.com/ccxt/ccxt/issues/4920 and https://github.com/ccxt/ccxt/issues/6081
'EDR': 'E-Dinar Coin', # conflicts with EDR for Endor Protocol and EDRCoin
'eETT': 'EETT',
'FirstBlood': '1ST',
'FORTYTWO': '42',
'LEO': 'LeoCoin',
'ORE': 'Orectic',
'PLN': 'Plutaneum', # conflict with Polish Zloty
'RUR': 'RUB',
'SCT': 'SpaceCoin',
'TPI': 'ThaneCoin',
'wETT': 'WETT',
'XBT': 'Bricktox',
},
'exceptions': {
'exact': {
'1': ExchangeError,
'10': AuthenticationError,
'100': ExchangeError, # invalid parameters
'101': AuthenticationError,
'102': AuthenticationError,
'103': InvalidOrder, # invalid currency
'104': InvalidOrder, # invalid amount
'105': InvalidOrder, # unable to block funds
'11': AuthenticationError,
'12': AuthenticationError,
'2': AuthenticationError, # "User not found"
'20': AuthenticationError,
'30': AuthenticationError,
'31': NotSupported,
'32': ExchangeError,
'429': DDoSProtection,
'503': ExchangeNotAvailable,
},
'broad': {
'insufficient funds': InsufficientFunds, # https://github.com/ccxt/ccxt/issues/5749
'NOT FOUND': OrderNotFound,
'Cannot find order': OrderNotFound,
'Minimal amount is': InvalidOrder,
},
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetExchangeTicker(params)
restrictions = await self.publicGetExchangeRestrictions()
restrictionsById = self.index_by(restrictions['restrictions'], 'currencyPair')
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'symbol')
baseId, quoteId = id.split('/')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
coinRestrictions = self.safe_value(restrictionsById, symbol)
precision = {
'price': 5,
'amount': 8,
'cost': 8,
}
limits = {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
}
if coinRestrictions:
precision['price'] = self.safe_integer(coinRestrictions, 'priceScale', 5)
limits['amount']['min'] = self.safe_float(coinRestrictions, 'minLimitQuantity', limits['amount']['min'])
limits['price'] = {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': limits,
'info': market,
})
return result
async def fetch_currencies(self, params={}):
response = await self.publicGetInfoCoinInfo(params)
currencies = self.safe_value(response, 'info')
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'symbol')
# todo: will need to rethink the fees
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
code = self.safe_currency_code(id)
precision = 8 # default precision, todo: fix "magic constants"
walletStatus = self.safe_string(currency, 'walletStatus')
active = (walletStatus == 'normal')
name = self.safe_string(currency, 'name')
fee = self.safe_float(currency, 'withdrawFee')
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': active,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(currency, 'minOrderAmount'),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': self.safe_float(currency, 'minOrderAmount'),
'max': None,
},
'withdraw': {
'min': self.safe_float(currency, 'minWithdrawAmount'),
'max': math.pow(10, precision),
},
'deposit': {
'min': self.safe_float(currency, 'minDepositAmount'),
'max': None,
},
},
}
result = self.append_fiat_currencies(result)
return result
def append_fiat_currencies(self, result):
precision = 8
defaults = {
'info': None,
'active': True,
'fee': None,
'precision': precision,
'limits': {
'withdraw': {'min': None, 'max': None},
'deposit': {'min': None, 'max': None},
'amount': {'min': None, 'max': None},
'cost': {'min': None, 'max': None},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
},
}
currencies = [
{'id': 'USD', 'code': 'USD', 'name': 'US Dollar'},
{'id': 'EUR', 'code': 'EUR', 'name': 'Euro'},
# {'id': 'RUR', 'code': 'RUB', 'name': 'Russian ruble'},
]
currencies.append({
'id': 'RUR',
'code': self.safe_currency_code('RUR'),
'name': 'Russian ruble',
})
for i in range(0, len(currencies)):
currency = currencies[i]
code = currency['code']
result[code] = self.extend(defaults, currency)
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetPaymentBalances(params)
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = None
if code in result:
account = result[code]
else:
account = self.account()
if balance['type'] == 'total':
account['total'] = self.safe_float(balance, 'value')
if balance['type'] == 'available':
account['free'] = self.safe_float(balance, 'value')
if balance['type'] == 'trade':
account['used'] = self.safe_float(balance, 'value')
result[code] = account
return self.parse_balance(result)
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.privateGetExchangeCommissionCommonInfo(params)
commission = self.safe_float(response, 'commission')
return {
'info': response,
'maker': commission,
'taker': commission,
}
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'currencyPair': self.market_id(symbol),
'groupByPrice': 'false',
}
if limit is not None:
request['depth'] = limit # 100
response = await self.publicGetExchangeOrderBook(self.extend(request, params))
timestamp = self.safe_integer(response, 'timestamp')
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
vwap = self.safe_float(ticker, 'vwap')
baseVolume = self.safe_float(ticker, 'volume')
quoteVolume = None
if baseVolume is not None and vwap is not None:
quoteVolume = baseVolume * vwap
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'best_bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'best_ask'),
'askVolume': None,
'vwap': self.safe_float(ticker, 'vwap'),
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetExchangeTicker(params)
tickers = self.index_by(response, 'symbol')
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
}
ticker = await self.publicGetExchangeTicker(self.extend(request, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "time": 1409935047,
# "id": 99451,
# "price": 350,
# "quantity": 2.85714285,
# "type": "BUY"
# }
#
# fetchMyTrades(private)
#
# {
# "datetime": 1435844369,
# "id": 30651619,
# "type": "sell",
# "symbol": "BTC/EUR",
# "price": 230,
# "quantity": 0.1,
# "commission": 0,
# "clientorderid": 1472837650
# }
timestamp = self.safe_timestamp_2(trade, 'time', 'datetime')
fee = None
feeCost = self.safe_float(trade, 'commission')
if feeCost is not None:
feeCurrency = market['quote'] if market else None
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'clientorderid')
id = self.safe_string(trade, 'id')
side = self.safe_string_lower(trade, 'type')
amount = self.safe_float(trade, 'quantity')
price = self.safe_float(trade, 'price')
cost = None
if amount is not None:
if price is not None:
cost = amount * price
symbol = None
if market is not None:
symbol = market['symbol']
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
# orderDesc': 'true', # or 'false', if True then new orders will be first, otherwise old orders will be first.
# 'offset': 0, # page offset, position of the first item on the page
}
if limit is not None:
request['limit'] = limit
response = await self.privateGetExchangeTrades(self.extend(request, params))
#
# [
# {
# "datetime": 1435844369,
# "id": 30651619,
# "type": "sell",
# "symbol": "BTC/EUR",
# "price": 230,
# "quantity": 0.1,
# "commission": 0,
# "clientorderid": 1472837650
# },
# {
# "datetime": 1435844356,
# "id": 30651618,
# "type": "sell",
# "symbol": "BTC/EUR",
# "price": 230,
# "quantity": 0.2,
# "commission": 0.092,
# "clientorderid": 1472837651
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
}
response = await self.publicGetExchangeLastTrades(self.extend(request, params))
#
# [
# {
# "time": 1409935047,
# "id": 99451,
# "price": 350,
# "quantity": 2.85714285,
# "type": "BUY"
# },
# {
# "time": 1409934792,
# "id": 99450,
# "price": 350,
# "quantity": 0.57142857,
# "type": "SELL"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'orderId': id,
}
response = await self.privateGetExchangeOrder(self.extend(request, params))
return self.parse_order(response)
def parse_order_status(self, status):
statuses = {
'OPEN': 'open',
'PARTIALLY_FILLED': 'open',
'EXECUTED': 'closed',
'CANCELLED': 'canceled',
'PARTIALLY_FILLED_AND_CANCELLED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
timestamp = None
if 'lastModificationTime' in order:
timestamp = self.safe_string(order, 'lastModificationTime')
if timestamp is not None:
if timestamp.find('T') >= 0:
timestamp = self.parse8601(timestamp)
else:
timestamp = self.safe_integer(order, 'lastModificationTime')
# TODO currently not supported by livecoin
# trades = self.parse_trades(order['trades'], market, since, limit)
trades = None
status = self.parse_order_status(self.safe_string_2(order, 'status', 'orderStatus'))
symbol = None
if market is None:
marketId = self.safe_string(order, 'currencyPair')
marketId = self.safe_string(order, 'symbol', marketId)
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
type = self.safe_string_lower(order, 'type')
side = None
if type is not None:
orderType = type.split('_')
type = orderType[0]
side = orderType[1]
price = self.safe_float(order, 'price')
# of the next two lines the latter overrides the former, if present in the order structure
remaining = self.safe_float(order, 'remainingQuantity')
remaining = self.safe_float(order, 'remaining_quantity', remaining)
amount = self.safe_float(order, 'quantity', remaining)
filled = None
if remaining is not None:
filled = amount - remaining
cost = None
if filled is not None and price is not None:
cost = filled * price
feeRate = self.safe_float(order, 'commission_rate')
feeCost = None
if cost is not None and feeRate is not None:
feeCost = cost * feeRate
feeCurrency = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
return {
'info': order,
'id': order['id'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'filled': filled,
'remaining': remaining,
'trades': trades,
'fee': {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
},
}
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['currencyPair'] = market['id']
if since is not None:
request['issuedFrom'] = int(since)
if limit is not None:
request['endRow'] = limit - 1
response = await self.privateGetExchangeClientOrders(self.extend(request, params))
result = []
rawOrders = []
if response['data']:
rawOrders = response['data']
for i in range(0, len(rawOrders)):
order = rawOrders[i]
result.append(self.parse_order(order, market))
return self.sort_by(result, 'timestamp')
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'openClosed': 'OPEN',
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'openClosed': 'CLOSED',
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
method = 'privatePostExchange' + self.capitalize(side) + type
market = self.market(symbol)
request = {
'quantity': self.amount_to_precision(symbol, amount),
'currencyPair': market['id'],
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
response = await getattr(self, method)(self.extend(request, params))
result = {
'info': response,
'id': str(response['orderId']),
}
success = self.safe_value(response, 'success')
if success:
result['status'] = 'open'
return result
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'currencyPair': market['id'],
}
response = await self.privatePostExchangeCancellimit(self.extend(request, params))
message = self.safe_string(response, 'message', self.json(response))
if 'success' in response:
if not response['success']:
raise InvalidOrder(message)
elif 'cancelled' in response:
if response['cancelled']:
return {
'status': 'canceled',
'info': response,
}
else:
raise OrderNotFound(message)
raise ExchangeError(self.id + ' cancelOrder() failed: ' + self.json(response))
async def withdraw(self, code, amount, address, tag=None, params={}):
# Sometimes the response with be {key: null} for all keys.
# An example is if you attempt to withdraw more than is allowed when withdrawal fees are considered.
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
wallet = address
if tag is not None:
wallet += '::' + tag
request = {
'amount': self.decimal_to_precision(amount, TRUNCATE, currency['precision'], DECIMAL_PLACES),
'currency': currency['id'],
'wallet': wallet,
}
response = await self.privatePostPaymentOutCoin(self.extend(request, params))
id = self.safe_integer(response, 'id')
if id is None:
raise InsufficientFunds(self.id + ' insufficient funds to cover requested withdrawal amount post fees ' + self.json(response))
return {
'info': response,
'id': id,
}
def parse_transaction(self, transaction, currency=None):
# {
# "id": "c853093d5aa06df1c92d79c2...",(tx on deposits, address on withdrawals)
# "type": "DEPOSIT",
# "date": 1553186482676,
# "amount": 712.61266,
# "fee": 0,
# "fixedCurrency": "XVG",
# "taxCurrency": "XVG",
# "variableAmount": null,
# "variableCurrency": null,
# "external": "Coin",
# "login": "USERNAME",
# "externalKey": "....87diPBy......3hTtuwUT78Yi",(address on deposits, tx on withdrawals)
# "documentId": 1110662453
# },
txid = None
address = None
id = self.safe_string(transaction, 'documentId')
amount = self.safe_float(transaction, 'amount')
timestamp = self.safe_integer(transaction, 'date')
type = self.safe_string_lower(transaction, 'type')
currencyId = self.safe_string(transaction, 'fixedCurrency')
feeCost = self.safe_float(transaction, 'fee')
code = self.safe_currency_code(currencyId, currency)
if type == 'withdrawal':
txid = self.safe_string(transaction, 'externalKey')
address = self.safe_string(transaction, 'id')
elif type == 'deposit':
address = self.safe_string(transaction, 'externalKey')
txid = self.safe_string(transaction, 'id')
status = None
if type == 'deposit':
status = 'ok' # Deposits is not registered until they are in account. Withdrawals are left as None, not entirely sure about theyre status.
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': None,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
endtime = 2505600000 # 29 days - exchange has maximum 30 days.
now = self.milliseconds()
request = {
'types': 'DEPOSIT',
'end': now,
'start': int(since) if (since is not None) else now - endtime,
}
currency = None
if code is not None:
currency = self.currency(code)
if limit is not None:
request['limit'] = limit # default is 100
response = await self.privateGetPaymentHistoryTransactions(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
endtime = 2505600000 # 29 days - exchange has maximum 30 days.
now = self.milliseconds()
request = {
'types': 'WITHDRAWAL',
'end': now,
'start': int(since) if (since is not None) else now - endtime,
}
currency = None
if code is not None:
currency = self.currency(code)
if limit is not None:
request['limit'] = limit # default is 100
if since is not None:
request['start'] = since
response = await self.privateGetPaymentHistoryTransactions(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit)
async def fetch_deposit_address(self, currency, params={}):
request = {
'currency': currency,
}
response = await self.privateGetPaymentGetAddress(self.extend(request, params))
address = self.safe_string(response, 'wallet')
tag = None
if address.find(':') >= 0:
parts = address.split(':')
address = parts[0]
tag = parts[2]
self.check_address(address)
return {
'currency': currency,
'address': address,
'tag': tag,
'info': response,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + path
query = self.urlencode(self.keysort(params))
if method == 'GET':
if params:
url += '?' + query
if api == 'private':
self.check_required_credentials()
if method == 'POST':
body = query
signature = self.hmac(self.encode(query), self.encode(self.secret), hashlib.sha256)
headers = {
'Api-Key': self.apiKey,
'Sign': signature.upper(),
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if code >= 300:
feedback = self.id + ' ' + body
exact = self.exceptions['exact']
errorCode = self.safe_string(response, 'errorCode')
if errorCode in exact:
raise exact[errorCode](feedback)
else:
raise ExchangeError(feedback)
# returns status code 200 even if success == False
success = self.safe_value(response, 'success', True)
if not success:
feedback = self.id + ' ' + body
broad = self.exceptions['broad']
message = self.safe_string_2(response, 'message', 'exception')
if message is not None:
broadKey = self.findBroadlyMatchedKey(broad, message)
if broadKey is not None:
raise broad[broadKey](feedback)
raise ExchangeError(feedback)
| 39.406659 | 160 | 0.50264 | [
"MIT"
] | Evan-carry-you/ccxt | python/ccxt/async_support/livecoin.py | 33,141 | Python |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2015-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import functools
import typing
from edb.lang.common import ast
from edb.lang.schema import basetypes as s_basetypes
from edb.lang.schema import inheriting as s_inh
from edb.lang.schema import name as s_name
from edb.lang.schema import objects as s_obj
from edb.lang.schema import types as s_types
from edb.lang.schema import utils as s_utils
from edb.lang.edgeql import ast as qlast
from edb.lang.edgeql import errors as ql_errors
from edb.lang.ir import ast as irast
def is_polymorphic_type(t):
if isinstance(t, s_types.Collection):
return any(is_polymorphic_type(st) for st in t.get_subtypes())
else:
return t.name == 'std::any'
def amend_empty_set_type(es: irast.EmptySet, t: s_obj.Object, schema) -> None:
alias = es.path_id.target.name.name
scls_name = s_name.Name(module='__expr__', name=alias)
scls = t.__class__(name=scls_name, bases=[t])
scls.acquire_ancestor_inheritance(schema)
es.path_id = irast.PathId(scls)
es.scls = t
def _infer_common_type(irs: typing.List[irast.Base], schema):
if not irs:
raise ql_errors.EdgeQLError(
'cannot determine common type of an empty set',
context=irs[0].context)
col_type = None
arg_types = []
empties = []
for i, arg in enumerate(irs):
if isinstance(arg, irast.EmptySet) and arg.scls is None:
empties.append(i)
continue
arg_type = infer_type(arg, schema)
arg_types.append(arg_type)
if isinstance(arg_type, s_types.Collection):
col_type = arg_type
if not arg_types:
raise ql_errors.EdgeQLError(
'cannot determine common type of an empty set',
context=irs[0].context)
if col_type is not None:
if not all(col_type.issubclass(t) for t in arg_types):
raise ql_errors.EdgeQLError(
'cannot determine common type',
context=irs[0].context)
common_type = col_type
else:
common_type = s_utils.get_class_nearest_common_ancestor(arg_types)
for i in empties:
amend_empty_set_type(irs[i], common_type, schema)
return common_type
@functools.singledispatch
def _infer_type(ir, schema):
return
@_infer_type.register(type(None))
def __infer_none(ir, schema):
# Here for debugging purposes.
raise ValueError('invalid infer_type(None, schema) call')
@_infer_type.register(irast.Statement)
def __infer_statement(ir, schema):
return infer_type(ir.expr, schema)
@_infer_type.register(irast.Set)
def __infer_set(ir, schema):
return ir.scls
@_infer_type.register(irast.FunctionCall)
def __infer_func_call(ir, schema):
rtype = ir.func.returntype
if is_polymorphic_type(rtype):
# Polymorphic function, determine the result type from
# the argument type.
if isinstance(rtype, s_types.Tuple):
for i, arg in enumerate(ir.args):
if is_polymorphic_type(ir.func.paramtypes[i]):
arg_type = infer_type(arg, schema)
stypes = collections.OrderedDict(rtype.element_types)
for sn, st in stypes.items():
if is_polymorphic_type(st):
stypes[sn] = arg_type
break
return rtype.from_subtypes(stypes, rtype.get_typemods())
elif isinstance(rtype, s_types.Collection):
for i, arg in enumerate(ir.args):
if is_polymorphic_type(ir.func.paramtypes[i]):
arg_type = infer_type(arg, schema)
stypes = list(rtype.get_subtypes())
for si, st in enumerate(stypes):
if is_polymorphic_type(st):
stypes[si] = arg_type
break
return rtype.from_subtypes(stypes, rtype.get_typemods())
else:
for i, arg in enumerate(ir.args):
if is_polymorphic_type(ir.func.paramtypes[i]):
arg_type = infer_type(arg, schema)
if isinstance(arg_type, s_types.Collection):
stypes = list(arg_type.get_subtypes())
return stypes[-1]
else:
return rtype
@_infer_type.register(irast.Constant)
@_infer_type.register(irast.Parameter)
def __infer_const_or_param(ir, schema):
return ir.type
@_infer_type.register(irast.Coalesce)
def __infer_coalesce(ir, schema):
result = _infer_common_type([ir.left, ir.right], schema)
if result is None:
raise ql_errors.EdgeQLError(
'coalescing operator must have operands of related types',
context=ir.context)
return result
@_infer_type.register(irast.SetOp)
def __infer_setop(ir, schema):
left_type = infer_type(ir.left, schema).material_type()
right_type = infer_type(ir.right, schema).material_type()
# for purposes of type inference UNION and UNION ALL work almost
# the same way
if ir.op == qlast.UNION:
if left_type.issubclass(right_type):
result = left_type
elif right_type.issubclass(left_type):
result = right_type
else:
result = s_inh.create_virtual_parent(
schema, [left_type, right_type])
else:
result = infer_type(ir.left, schema)
# create_virtual_parent will raise if types are incompatible.
s_inh.create_virtual_parent(schema, [left_type, right_type])
return result
@_infer_type.register(irast.DistinctOp)
def __infer_distinctop(ir, schema):
result = infer_type(ir.expr, schema)
return result
def _infer_binop_args(left, right, schema):
if not isinstance(left, irast.EmptySet) or left.scls is not None:
left_type = infer_type(left, schema)
else:
left_type = None
if not isinstance(right, irast.EmptySet) or right.scls is not None:
right_type = infer_type(right, schema)
else:
right_type = None
if left_type is None and right_type is None:
raise ql_errors.EdgeQLError(
'cannot determine the type of an empty set',
context=left.context)
elif left_type is None:
amend_empty_set_type(left, right_type, schema)
left_type = right_type
elif right_type is None:
amend_empty_set_type(right, left_type, schema)
right_type = left_type
return left_type, right_type
@_infer_type.register(irast.BinOp)
def __infer_binop(ir, schema):
left_type, right_type = _infer_binop_args(ir.left, ir.right, schema)
if isinstance(ir.op, (ast.ops.ComparisonOperator,
ast.ops.MembershipOperator)):
result = schema.get('std::bool')
else:
result = s_basetypes.TypeRules.get_result(
ir.op, (left_type, right_type), schema)
if result is None:
result = s_basetypes.TypeRules.get_result(
(ir.op, 'reversed'), (right_type, left_type), schema)
if result is None:
if right_type.implicitly_castable_to(left_type, schema):
right_type = left_type
elif left_type.implicitly_castable_to(right_type, schema):
left_type = right_type
result = s_basetypes.TypeRules.get_result(
(ir.op, 'reversed'), (right_type, left_type), schema)
if result is None:
raise ql_errors.EdgeQLError(
f'binary operator `{ir.op.upper()}` is not defined for types '
f'{left_type.name} and {right_type.name}',
context=ir.left.context)
return result
@_infer_type.register(irast.EquivalenceOp)
def __infer_equivop(ir, schema):
left_type, right_type = _infer_binop_args(ir.left, ir.right, schema)
return schema.get('std::bool')
@_infer_type.register(irast.TypeCheckOp)
def __infer_typecheckop(ir, schema):
left_type, right_type = _infer_binop_args(ir.left, ir.right, schema)
return schema.get('std::bool')
@_infer_type.register(irast.UnaryOp)
def __infer_unaryop(ir, schema):
result = None
operand_type = infer_type(ir.expr, schema)
if ir.op == ast.ops.NOT:
if operand_type.name == 'std::bool':
result = operand_type
else:
if ir.op not in {ast.ops.UPLUS, ast.ops.UMINUS}:
raise ql_errors.EdgeQLError(
f'unknown unary operator: {ir.op}',
context=ir.context)
result = s_basetypes.TypeRules.get_result(
ir.op, (operand_type,), schema)
if result is None:
raise ql_errors.EdgeQLError(
f'unary operator `{ir.op.upper()}` is not defined '
f'for type {operand_type.name}',
context=ir.context)
return result
@_infer_type.register(irast.IfElseExpr)
def __infer_ifelse(ir, schema):
if_expr_type = infer_type(ir.if_expr, schema)
else_expr_type = infer_type(ir.else_expr, schema)
result = s_utils.get_class_nearest_common_ancestor(
[if_expr_type, else_expr_type])
if result is None:
raise ql_errors.EdgeQLError(
'if/else clauses must be of related types, got: {}/{}'.format(
if_expr_type.name, else_expr_type.name),
context=ir.if_expr.context)
return result
@_infer_type.register(irast.TypeRef)
def __infer_typeref(ir, schema):
if ir.subtypes:
coll = s_types.Collection.get_class(ir.maintype)
result = coll.from_subtypes(
[infer_type(t, schema) for t in ir.subtypes])
else:
result = schema.get(ir.maintype)
return result
@_infer_type.register(irast.TypeCast)
def __infer_typecast(ir, schema):
return infer_type(ir.type, schema)
@_infer_type.register(irast.Stmt)
def __infer_stmt(ir, schema):
return infer_type(ir.result, schema)
@_infer_type.register(irast.ExistPred)
def __infer_exist(ir, schema):
bool_t = schema.get('std::bool')
if isinstance(ir.expr, irast.EmptySet) and ir.expr.scls is None:
amend_empty_set_type(ir.expr, bool_t, schema=schema)
return bool_t
@_infer_type.register(irast.SliceIndirection)
def __infer_slice(ir, schema):
return infer_type(ir.expr, schema)
@_infer_type.register(irast.IndexIndirection)
def __infer_index(ir, schema):
node_type = infer_type(ir.expr, schema)
index_type = infer_type(ir.index, schema)
str_t = schema.get('std::str')
int_t = schema.get('std::int64')
result = None
if node_type.issubclass(str_t):
if not index_type.issubclass(int_t):
raise ql_errors.EdgeQLError(
f'cannot index string by {index_type.name}, '
f'{int_t.name} was expected',
context=ir.index.context)
result = str_t
elif isinstance(node_type, s_types.Array):
if not index_type.issubclass(int_t):
raise ql_errors.EdgeQLError(
f'cannot index array by {index_type.name}, '
f'{int_t.name} was expected',
context=ir.index.context)
result = node_type.element_type
return result
@_infer_type.register(irast.Array)
def __infer_array(ir, schema):
if ir.elements:
element_type = _infer_common_type(ir.elements, schema)
if element_type is None:
raise ql_errors.EdgeQLError('could not determine array type',
context=ir.context)
else:
raise ql_errors.EdgeQLError(
'could not determine type of empty array',
context=ir.context)
return s_types.Array(element_type=element_type)
@_infer_type.register(irast.Tuple)
def __infer_struct(ir, schema):
element_types = {el.name: infer_type(el.val, schema) for el in ir.elements}
return s_types.Tuple(element_types=element_types, named=ir.named)
@_infer_type.register(irast.TupleIndirection)
def __infer_struct_indirection(ir, schema):
struct_type = infer_type(ir.expr, schema)
result = struct_type.element_types.get(ir.name)
if result is None:
raise ql_errors.EdgeQLError('could not determine struct element type',
context=ir.context)
return result
def infer_type(ir, schema):
try:
return ir._inferred_type_
except AttributeError:
pass
result = _infer_type(ir, schema)
if (result is not None and
not isinstance(result, (s_obj.Object, s_obj.ObjectMeta))):
raise ql_errors.EdgeQLError(
f'infer_type({ir!r}) retured {result!r} instead of a Object',
context=ir.context)
if result is None or result.name == 'std::any':
raise ql_errors.EdgeQLError('could not determine expression type',
context=ir.context)
ir._inferred_type_ = result
return result
| 30.609481 | 79 | 0.652212 | [
"Apache-2.0"
] | mcaramma/edgedb | edb/lang/ir/inference/types.py | 13,560 | Python |
# Date: May 2018
# Author: Joe English, PDST
# eMail: [email protected]
# Name: Guessing Game v7
# Purpose: A program to demonstrate data validation
# Description: This is the exact same as version 6 except the input is validated
# Guess Game v7 - while - go again? - data validation
import random
number = random.randint(1, 10)
# Initialise the loop guard variable
keepGoing = True
# Loop as long as keepGoing is True
while keepGoing:
guess = input("Enter a number between 1 and 10: ")
# Validate. Make sure the value entered is numeric
while not guess.isdigit():
guess = input("Enter a number between 1 and 10: ")
# Convert the string to an integer
guess = int(guess)
if guess == number:
print("Correct")
goAgain = input("Play again? (Y/N): ")
if goAgain.upper() == "N":
keepGoing = False
else:
# Get a new number
number = random.randint(1, 10)
elif guess < number:
print("Too low")
else:
print("Too high")
print("Goodbye")
| 25.227273 | 81 | 0.609009 | [
"CC0-1.0"
] | gitjot/python-for-lccs | Section 5 - Programming Logic/Guess game v7 - while - data validation.py | 1,110 | Python |
from math import log, exp
from numpy import inf, zeros, zeros_like as np_zeros_like, arange, asarray, empty
from pandas import concat
from anndata import AnnData
from torch import cat, no_grad, randn, zeros_like, zeros as torch_zeros, ones, argmax
from torch.nn import Module, Linear, Sequential, RNNCell, Softplus, Parameter, Softmax
from torch.optim import Adam
from torch.optim.lr_scheduler import StepLR
from .Layers import Input_Block, FF_Block, LambdaLayer, Dual_Forward
class sciPENN_Model(Module):
def __init__(self, p_mod1, p_mod2, loss1, loss2, quantiles, categories):
super(sciPENN_Model, self).__init__()
h_size, drop_rate = 512, 0.25
self.RNNCell = RNNCell(h_size, h_size)
self.input_block = Input_Block(p_mod1, h_size, drop_rate, drop_rate)
self.skip_1 = FF_Block(h_size, drop_rate)
self.skip_2 = FF_Block(h_size, drop_rate)
self.skip_3 = FF_Block(h_size, drop_rate)
MSE_output = Linear(h_size, p_mod2)
if len(quantiles) > 0:
quantile_layer = []
quantile_layer.append(Linear(h_size, p_mod2 * len(quantiles)))
quantile_layer.append(LambdaLayer(lambda x: x.view(-1, p_mod2, len(quantiles))))
quantile_layer = Sequential(*quantile_layer)
self.mod2_out = Dual_Forward(MSE_output, quantile_layer)
else:
self.mod2_out = MSE_output
if categories is not None:
self.celltype_out = Sequential(Linear(h_size, len(categories)), Softmax(1))
self.forward = self.forward_transfer
self.categories_arr = empty((len(categories), ), dtype = 'object')
for cat in categories:
self.categories_arr[categories[cat]] = cat
else:
self.forward = self.forward_simple
self.categories_arr = None
self.quantiles = quantiles
self.loss1, self.loss2 = loss1, loss2
def forward_transfer(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': self.celltype_out(h.detach()), 'modality 2': self.mod2_out(h), 'embedding': h}
def forward_simple(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': None, 'modality 2': self.mod2_out(h), 'embedding': h}
def train_backprop(self, train_loader, val_loader,
n_epoch = 10000, ES_max = 30, decay_max = 10, decay_step = 0.1, lr = 10**(-3)):
optimizer = Adam(self.parameters(), lr = lr)
scheduler = StepLR(optimizer, step_size = 1, gamma = decay_step)
patience = 0
bestloss = inf
if self.categories_arr is None:
get_correct = lambda x: 0
else:
get_correct = lambda outputs: (argmax(outputs['celltypes'], axis = 1) == celltypes).sum()
for epoch in range(n_epoch):
with no_grad():
running_loss, rtype_acc = 0., 0.
self.eval()
for batch, inputs in enumerate(val_loader):
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
n_correct = get_correct(outputs)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
rtype_acc += n_correct
running_loss += mod2_loss.item() * len(mod2)
if self.categories_arr is None:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}")
else:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}, validation accuracy = {rtype_acc/len(val_loader):.3f}")
patience += 1
if bestloss/1.005 > running_loss:
bestloss, patience = running_loss, 0
if (patience + 1) % decay_max == 0:
scheduler.step()
print(f"Decaying loss to {optimizer.param_groups[0]['lr']}")
if (patience + 1) > ES_max:
break
self.train()
for batch, inputs in enumerate(train_loader):
optimizer.zero_grad()
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
mod1_loss = self.loss1(outputs['celltypes'], celltypes)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
loss = mod1_loss + mod2_loss
loss.backward()
optimizer.step()
def impute(self, impute_loader, requested_quantiles, denoise_genes, proteins):
imputed_test = proteins.copy()
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = self.fill_predicted(imputed_test.X[start:end], mod2_impute, bools)
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
return imputed_test
def embed(self, impute_loader, test_loader, cells_train, cells_test):
if cells_test is not None:
embedding = AnnData(zeros(shape = (len(cells_train) + len(cells_test), 512)))
embedding.obs = concat((cells_train, cells_test), join = 'inner')
else:
embedding = AnnData(zeros(shape = (len(cells_train), 512)))
embedding.obs = cells_train
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
if cells_test is not None:
for mod1 in test_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
return embedding
def fill_predicted(self, array, predicted, bools):
bools = bools.cpu().numpy()
return (1. - bools) * predicted.cpu().numpy() + array
def predict(self, test_loader, requested_quantiles, denoise_genes, proteins, cells):
imputed_test = AnnData(zeros(shape = (len(cells), len(proteins.var))))
imputed_test.obs = cells
imputed_test.var.index = proteins.var.index
if self.categories_arr is not None:
celltypes = ['None'] * len(cells)
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1 in test_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if self.categories_arr is not None:
predicted_types = argmax(outputs['celltypes'], axis = 1).cpu().numpy()
celltypes[start:end] = self.categories_arr[predicted_types].tolist()
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = mod2_impute.cpu().numpy()
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
if self.categories_arr is not None:
imputed_test.obs['transfered cell labels'] = celltypes
return imputed_test | 37.434263 | 151 | 0.540975 | [
"MIT"
] | jlakkis/sciPENN | src/sciPENN/Network/Model.py | 9,396 | Python |
import os
import sys
import time
import datetime
import selenium
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
from compare_images import *
if __name__ == "__main__":
# Create a Firefox window driver.
browser = webdriver.Firefox()
browser.set_window_size(400, 400)
# Load the vtkweb application page.
url = "http://localhost:8000/testing/drawCountries.html"
browser.get(url)
# Give the page some time to update the image.
time.sleep(1)
# Take a screenshot.
shot = "drawCountries-%s.png" % (datetime.datetime.now())
browser.save_screenshot(shot)
# Compare the screenshot with the baseline, and report to stdout.
baseline_dir = os.environ['VGL_BASELINE_DIR']
print check_result_image(shot, os.path.join(baseline_dir, "baseline-drawCountries.png"), 20)
# Close the browser window.
browser.quit()
| 27.914286 | 96 | 0.738997 | [
"Apache-2.0"
] | OpenGeoscience/vgl | src/testing/drawCountriesReg.py | 977 | Python |
import json
import hashlib
from tornado import httpclient as hc
from tornado import gen
from graphite_beacon.handlers import LOGGER, AbstractHandler
class PagerdutyHandler(AbstractHandler):
name = 'pagerduty'
# Default options
defaults = {
'subdomain': None,
'apitoken': None,
'service_key': None
}
def init_handler(self):
self.subdomain = self.options.get('subdomain')
assert self.subdomain, 'subdomain is not defined'
self.apitoken = self.options.get('apitoken')
assert self.apitoken, 'apitoken is not defined'
self.service_key = self.options.get('service_key')
assert self.service_key, 'service_key is not defined'
self.client = hc.AsyncHTTPClient()
@gen.coroutine
def notify(self, level, alert, value, target=None, ntype=None, rule=None):
LOGGER.debug("Handler (%s) %s", self.name, level)
message = self.get_short(level, alert, value, target=target, ntype=ntype, rule=rule)
LOGGER.debug('message1:%s', message)
if level == 'normal':
event_type = 'resolve'
else:
event_type = 'trigger'
headers = {
"Content-type": "application/json",
}
client_url = None
if target:
client_url = alert.get_graph_url(target)
m = hashlib.md5()
incident_key_str = "alert={},client_url={}".format(alert.name, client_url)
m.update(incident_key_str)
incident_key = m.hexdigest()
data = {
"service_key": self.service_key,
"event_type": event_type,
"description": message,
"details": message,
"incident_key": incident_key,
"client": 'graphite-beacon',
"client_url": client_url
}
yield self.client.fetch(
"https://events.pagerduty.com/generic/2010-04-15/create_event.json",
body=json.dumps(data),
headers=headers,
method='POST'
)
| 30.462687 | 92 | 0.598726 | [
"MIT"
] | z1nkum/graphite-beacon | graphite_beacon/handlers/pagerduty.py | 2,041 | Python |
from uuid import uuid4
from django.contrib.auth.models import (AbstractBaseUser, BaseUserManager,
PermissionsMixin)
from django.core.validators import EmailValidator
from django.db import models
class UserManager(BaseUserManager):
def create_user(self, name, email, password=None):
if not email:
raise ValueError('O email é obrigatório.')
email = self.normalize_email(email)
user = self.model(name=name, email=email)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, name, email, password):
user = self.create_user(name, email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
public_id = models.UUIDField(default=uuid4, editable=False)
name = models.CharField('Nome', max_length=255)
email = models.EmailField('Email', max_length=255,
unique=True, validators=[EmailValidator(message='Email inválido.'), ])
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name', ]
def get_full_name(self):
return self.name
def get_shot_name(self):
return self.name
def __str__(self):
return self.name
| 28.730769 | 100 | 0.657965 | [
"MIT"
] | MaryzangelaBessa/ElRoyale | users/models.py | 1,497 | Python |
import argparse
import os
import numpy as np
from keras.layers import Conv2D, Input, BatchNormalization, LeakyReLU, ZeroPadding2D, UpSampling2D
from keras.layers.merge import add, concatenate
from keras.models import Model
import struct
import cv2
import time
from pathlib import Path
#np.set_printoptions(threshold=np.nan)
np.set_printoptions(threshold=30)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
argparser = argparse.ArgumentParser(
description='test yolov3 network with coco weights')
argparser.add_argument(
'-w',
'--weights',
help='path to weights file')
argparser.add_argument(
'-v',
'--video',
help='path to video file')
class WeightReader:
def __init__(self, weight_file):
with open(weight_file, 'rb') as w_f:
major, = struct.unpack('i', w_f.read(4))
minor, = struct.unpack('i', w_f.read(4))
revision, = struct.unpack('i', w_f.read(4))
if (major*10 + minor) >= 2 and major < 1000 and minor < 1000:
w_f.read(8)
else:
w_f.read(4)
transpose = (major > 1000) or (minor > 1000)
binary = w_f.read()
self.offset = 0
self.all_weights = np.frombuffer(binary, dtype='float32')
def read_bytes(self, size):
self.offset = self.offset + size
return self.all_weights[self.offset-size:self.offset]
def load_weights(self, model):
for i in range(106):
try:
conv_layer = model.get_layer('conv_' + str(i))
print("loading weights of convolution #" + str(i))
if i not in [81, 93, 105]:
norm_layer = model.get_layer('bnorm_' + str(i))
size = np.prod(norm_layer.get_weights()[0].shape)
beta = self.read_bytes(size) # bias
gamma = self.read_bytes(size) # scale
mean = self.read_bytes(size) # mean
var = self.read_bytes(size) # variance
weights = norm_layer.set_weights([gamma, beta, mean, var])
if len(conv_layer.get_weights()) > 1:
bias = self.read_bytes(np.prod(conv_layer.get_weights()[1].shape))
kernel = self.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel, bias])
else:
kernel = self.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel])
except ValueError:
print("no convolution #" + str(i))
def reset(self):
self.offset = 0
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, objness = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.objness = objness
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def _conv_block(inp, convs, skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if skip else x
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def make_yolov3_model():
input_image = Input(shape=(None, None, 3))
# Layer 0 => 4
x = _conv_block(input_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
{'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
{'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
{'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}])
# Layer 5 => 8
x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}])
# Layer 9 => 11
x = _conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}])
# Layer 12 => 15
x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}])
# Layer 16 => 36
for i in range(7):
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}])
skip_36 = x
# Layer 37 => 40
x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}])
# Layer 41 => 61
for i in range(7):
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}])
skip_61 = x
# Layer 62 => 65
x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}])
# Layer 66 => 74
for i in range(3):
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}])
# Layer 75 => 79
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], skip=False)
# Layer 80 => 82
yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 81}], skip=False)
# Layer 83 => 86
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_61])
# Layer 87 => 91
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], skip=False)
# Layer 92 => 94
yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 93}], skip=False)
# Layer 95 => 98
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 96}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_36])
# Layer 99 => 106
yolo_106 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 99},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 100},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 101},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 102},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 103},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 104},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 105}], skip=False)
model = Model(input_image, [yolo_82, yolo_94, yolo_106])
return model
def preprocess_input(image, net_h, net_w):
#new_h, new_w, _ = image.shape
new_h = 480
new_w = 640
# determine the new size of the image
if (float(net_w)/new_w) < (float(net_h)/new_h):
new_h = (new_h * net_w)/new_w
new_w = net_w
else:
new_w = (new_w * net_h)/new_h
new_h = net_h
# resize the image to the new size
resized = cv2.resize(image[:,:,::-1]/255., (int(new_w), int(new_h)))
# embed the image into the standard letter box
new_image = np.ones((net_h, net_w, 3)) * 0.5
new_image[int((net_h-new_h)//2):int((net_h+new_h)//2), int((net_w-new_w)//2):int((net_w+new_w)//2), :] = resized
new_image = np.expand_dims(new_image, 0)
return new_image
def decode_netout(netout, anchors, obj_thresh, nms_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4:] = _sigmoid(netout[..., 4:])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * netout[..., 5:]
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i / grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[int(row)][int(col)][b][4]
#objectness = netout[..., :4]
if(objectness.all() <= obj_thresh): continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[int(row)][int(col)][b][:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[int(row)][col][b][5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
#box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, None, classes)
boxes.append(box)
return boxes
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
if (float(net_w)/image_w) < (float(net_h)/image_h):
new_w = net_w
new_h = (image_h*net_w)/image_w
else:
new_h = net_w
new_w = (image_w*net_h)/image_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
def draw_boxes(image, boxes, labels, obj_thresh):
#highest_conf_label = ''
#highest_conf = 0
for box in boxes:
label_str = ''
label = -1
for i in range(len(labels)):
if box.classes[i] > obj_thresh:
label_str += labels[i]
label = i
print(labels[i] + ': ' + str(box.classes[i]*100) + '%')
#if box.classes[i] > highest_conf:
# highest_conf = box.classes[i]
# highest_conf_label = labels[i]
if label >= 0:
cv2.rectangle(image, (box.xmin,box.ymin), (box.xmax,box.ymax), (0,255,0), 3)
#print(type(box.get_score()))
#print(np.format_float_positional(box.get_score(), precision=2))
cv2.putText(image,
label_str + ' ' + str(np.format_float_positional(box.get_score(), precision=2)),
(box.xmin, box.ymin - 13),
cv2.FONT_HERSHEY_SIMPLEX,
1e-3 * image.shape[0],
(0,255,0), 2)
return image
def _main_(args):
weights_path = args.weights
video_path = args.video
# set some parameters
net_h, net_w = 416, 416
obj_thresh, nms_thresh = 0.65, 0.45
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", \
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", \
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", \
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", \
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", \
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", \
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", \
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", \
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", \
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
# make the yolov3 model to predict 80 classes on COCO
yolov3 = make_yolov3_model()
# load the weights trained on COCO into the model
weight_reader = WeightReader(weights_path)
weight_reader.load_weights(yolov3)
'''
# set webcam
cap = cv2.VideoCapture(1)
while(True):
ret, image = cap.read()
#image_h, image_w, _ = image.shape
image_w = cap.get(3)
image_h = cap.get(4)
if cv2.waitKey(1) & 0xFF == ord(' '):
new_image = preprocess_input(image, net_h, net_w)
yolos = yolov3.predict(new_image)
boxes = []
for i in range(len(yolos)):
boxes += decode_netout(yolos[i][0], anchors[i], obj_thresh, nms_thresh, net_h, net_w)
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
do_nms(boxes, nms_thresh)
draw_boxes_play_music(image, boxes, labels, obj_thresh)
cv2.imshow('frame',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
'''
# preprocess the video
cap = cv2.VideoCapture(video_path)
print("open video file from", video_path)
if Path(video_path).is_file():
print("Video file exists")
else:
print("cannot find video file")
print(cap.isOpened())
while(cap.isOpened()):
ret, image = cap.read()
image_w = cap.get(3)
image_h = cap.get(4)
image = cv2.flip(image, 0)
new_image = preprocess_input(image, net_h, net_w)
yolos = yolov3.predict(new_image)
boxes = []
for i in range(len(yolos)):
# decode the output of the network
boxes += decode_netout(yolos[i][0], anchors[i], obj_thresh, nms_thresh, net_h, net_w)
# correct the sizes of the bounding boxes
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
# suppress non-maximal boxes
do_nms(boxes, nms_thresh)
# draw bounding boxes on the image using labels
draw_boxes(image, boxes, labels, obj_thresh)
# write the image with bounding boxes to video
cv2.imshow('frame',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
args = argparser.parse_args()
_main_(args)
| 42.228216 | 136 | 0.532131 | [
"MIT"
] | BG4WCE/keras-yolo3 | yolo3_video.py | 20,354 | Python |
"""
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" Activation generator helper classes for TCAV"""
from abc import ABCMeta
from abc import abstractmethod
from multiprocessing import dummy as multiprocessing
import os.path
import numpy as np
import PIL.Image
import tensorflow as tf
class ActivationGeneratorInterface(object):
"""Interface for an activation generator for a model"""
__metaclass__ = ABCMeta
@abstractmethod
def process_and_load_activations(self, bottleneck_names, concepts):
pass
@abstractmethod
def get_model():
pass
class ActivationGeneratorBase(ActivationGeneratorInterface):
"""Basic abstract activation generator for a model"""
def __init__(self, model, acts_dir, max_examples=500):
self.model = model
self.acts_dir = acts_dir
self.max_examples = max_examples
def get_model(self):
return self.model
@abstractmethod
def get_examples_for_concept(self, concept):
pass
def get_activations_for_concept(self, concept, bottleneck):
examples = self.get_examples_for_concept(concept)
return self.get_activations_for_examples(examples, bottleneck)
def get_activations_for_examples(self, examples, bottleneck):
acts = self.model.run_examples(examples, bottleneck)
return self.model.reshape_activations(acts).squeeze()
def process_and_load_activations(self, bottleneck_names, concepts):
acts = {}
if self.acts_dir and not tf.gfile.Exists(self.acts_dir):
tf.gfile.MakeDirs(self.acts_dir)
for concept in concepts:
if concept not in acts:
acts[concept] = {}
for bottleneck_name in bottleneck_names:
acts_path = os.path.join(self.acts_dir, 'acts_{}_{}'.format(
concept, bottleneck_name)) if self.acts_dir else None
if acts_path and tf.gfile.Exists(acts_path):
with tf.gfile.Open(acts_path, 'rb') as f:
acts[concept][bottleneck_name] = np.load(f).squeeze()
tf.logging.info('Loaded {} shape {}'.format(
acts_path, acts[concept][bottleneck_name].shape))
else:
acts[concept][bottleneck_name] = self.get_activations_for_concept(
concept, bottleneck_name)
if acts_path:
tf.logging.info('{} does not exist, Making one...'.format(
acts_path))
with tf.gfile.Open(acts_path, 'w') as f:
np.save(f, acts[concept][bottleneck_name], allow_pickle=False)
return acts
class ImageActivationGenerator(ActivationGeneratorBase):
"""Activation generator for a basic image model"""
def __init__(self, model, source_dir, acts_dir, max_examples=10):
self.source_dir = source_dir
super(ImageActivationGenerator, self).__init__(
model, acts_dir, max_examples)
def get_examples_for_concept(self, concept):
concept_dir = os.path.join(self.source_dir, concept)
img_paths = [os.path.join(concept_dir, d)
for d in tf.gfile.ListDirectory(concept_dir)]
imgs = self.load_images_from_files(img_paths, self.max_examples,
shape=self.model.get_image_shape()[:2])
return imgs
def load_image_from_file(self, filename, shape):
"""Given a filename, try to open the file. If failed, return None.
Args:
filename: location of the image file
shape: the shape of the image file to be scaled
Returns:
the image if succeeds, None if fails.
Rasies:
exception if the image was not the right shape.
"""
if not tf.gfile.Exists(filename):
tf.logging.error('Cannot find file: {}'.format(filename))
return None
try:
# ensure image has no transparency channel
img = np.array(PIL.Image.open(tf.gfile.Open(filename, 'rb')).convert(
'RGB').resize(shape, PIL.Image.BILINEAR))
# Normalize pixel values to between 0 and 1.
img = np.float32(img) / 255.0
if not (len(img.shape) == 3 and img.shape[2] == 3):
return None
else:
return img
except Exception as e:
tf.logging.info(e)
return None
return img
def load_images_from_files(self, filenames, max_imgs=500,
do_shuffle=True, run_parallel=True,
shape=(299, 299),
num_workers=100):
"""Return image arrays from filenames.
Args:
filenames: locations of image files.
max_imgs: maximum number of images from filenames.
do_shuffle: before getting max_imgs files, shuffle the names or not
run_parallel: get images in parallel or not
shape: desired shape of the image
num_workers: number of workers in parallelization.
Returns:
image arrays
"""
imgs = []
# First shuffle a copy of the filenames.
filenames = filenames[:]
if do_shuffle:
np.random.shuffle(filenames)
if run_parallel:
pool = multiprocessing.Pool(num_workers)
imgs = pool.map(
lambda filename: self.load_image_from_file(filename, shape),
filenames[:max_imgs])
imgs = [img for img in imgs if img is not None]
if len(imgs) <= 1:
raise ValueError('You must have more than 1 image in each class to run TCAV.')
else:
for filename in filenames:
img = self.load_image_from_file(filename, shape)
if img is not None:
imgs.append(img)
if len(imgs) <= 1:
raise ValueError('You must have more than 1 image in each class to run TCAV.')
elif len(imgs) >= max_imgs:
break
return np.array(imgs)
| 33.494505 | 88 | 0.674869 | [
"Apache-2.0"
] | Gareth001/tcav | activation_generator.py | 6,096 | Python |
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
from rest_framework.schemas import get_schema_view
VERSION = 'V1.0.0'
urlpatterns = [
path('admin/', admin.site.urls),
url('api/{}/user/'.format(VERSION),include('app.user.urls',namespace='user')),
url('api/{}/core/'.format(VERSION),include('app.core.urls',namespace='core')),
]
| 30.538462 | 82 | 0.715365 | [
"MIT"
] | JeremyAndress/API-User-Template | src/config/urls.py | 397 | Python |
"""
========================
Obstacle Avoidance in 2D
========================
Plots a 2D DMP that goes through a point obstacle when there is no coupling
term for obstacle avoidance and a 2D DMP that avoids the point obstacle with
a coupling term.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from movement_primitives.dmp import DMP, CouplingTermObstacleAvoidance2D
execution_time = 1.0
start_y = np.zeros(2)
goal_y = np.ones(2)
dmp = DMP(n_dims=2, execution_time=execution_time, n_weights_per_dim=3)
dmp.configure(start_y=start_y, goal_y=goal_y)
dmp.set_weights(np.array([-50.0, 100.0, 300.0, -200.0, -200.0, -200.0]))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("x")
ax.set_ylabel("y")
obstacle_position = np.array([0.92, 0.5])
T, Y = dmp.open_loop(run_t=execution_time)
ax.plot(Y[:, 0], Y[:, 1], label="Original")
coupling_term = CouplingTermObstacleAvoidance2D(obstacle_position)
T, Y = dmp.open_loop(run_t=execution_time, coupling_term=coupling_term)
ax.plot(Y[:, 0], Y[:, 1], label="Obstacle avoidance")
ax.scatter(start_y[0], start_y[1], c="r", label="Start")
ax.scatter(goal_y[0], goal_y[1], c="g", label="Goal")
ax.scatter(obstacle_position[0], obstacle_position[1], c="y", label="Obstacle")
ax.legend()
plt.tight_layout()
plt.show()
| 30.738095 | 79 | 0.711077 | [
"BSD-3-Clause"
] | DavidYaonanZhu/movement_primitives | examples/plot_obstacle_avoidance_2d.py | 1,291 | Python |
#!/usr/bin/env python
#
# Copyright (c) 2018 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""Define the classes required to fully cover k8s."""
import logging
import os
import unittest
from security_tests import SecurityTesting
class SecurityTests(unittest.TestCase):
# pylint: disable=missing-docstring
def setUp(self):
os.environ["DEPLOY_SCENARIO"] = "k8-test"
os.environ["KUBE_MASTER_IP"] = "127.0.0.1"
os.environ["KUBE_MASTER_URL"] = "https://127.0.0.1:6443"
os.environ["KUBERNETES_PROVIDER"] = "local"
self.security_stesting = SecurityTesting.SecurityTesting()
def test_run_kubetest_cmd_none(self):
with self.assertRaises(TypeError):
self.security_stesting.run_security()
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
unittest.main(verbosity=2)
| 25.804878 | 71 | 0.713611 | [
"Apache-2.0"
] | onap/integration-xtesting | security/onap_security/test_security_test.py | 1,058 | Python |
#!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import logging
import time
from pprint import pformat
from ActionQueue import ActionQueue
import AmbariConfig
import hostname
from HostInfo import HostInfo
logger = logging.getLogger()
firstContact = True
class Heartbeat:
def __init__(self, actionQueue, config=None):
self.actionQueue = actionQueue
self.config = config
self.reports = []
def build(self, id='-1', state_interval=-1, componentsMapped=False):
global clusterId, clusterDefinitionRevision, firstContact
timestamp = int(time.time()*1000)
queueResult = self.actionQueue.result()
nodeStatus = { "status" : "HEALTHY",
"cause" : "NONE"}
heartbeat = { 'responseId' : int(id),
'timestamp' : timestamp,
'hostname' : hostname.hostname(),
'nodeStatus' : nodeStatus
}
commandsInProgress = False
if self.actionQueue.commandQueue.empty() == False:
commandsInProgress = True
if len(queueResult) != 0:
heartbeat['reports'] = queueResult['reports']
heartbeat['componentStatus'] = queueResult['componentStatus']
if len(heartbeat['reports']) > 0:
# There may be IN_PROGRESS tasks
commandsInProgress = True
pass
logger.info("Sending heartbeat with response id: " + str(id) + " and "
"timestamp: " + str(timestamp) +
". Command(s) in progress: " + repr(commandsInProgress) +
". Components mapped: " + repr(componentsMapped))
logger.debug("Heartbeat : " + pformat(heartbeat))
if (int(id) >= 0) and state_interval > 0 and (int(id) % state_interval) == 0:
hostInfo = HostInfo(self.config)
nodeInfo = { }
# for now, just do the same work as registration
# this must be the last step before returning heartbeat
hostInfo.register(nodeInfo, componentsMapped, commandsInProgress)
heartbeat['agentEnv'] = nodeInfo
logger.debug("agentEnv : " + str(nodeInfo))
return heartbeat
def main(argv=None):
actionQueue = ActionQueue(AmbariConfig.config)
heartbeat = Heartbeat(actionQueue)
print json.dumps(heartbeat.build('3',3))
if __name__ == '__main__':
main()
| 33.395604 | 81 | 0.676867 | [
"Apache-2.0"
] | zhanganha/ambari | ambari-agent/src/main/python/ambari_agent/Heartbeat.py | 3,039 | Python |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
FIREWALL = "firewall"
class Output:
ALLOWED = "allowed"
CREATIONTIMESTAMP = "creationTimestamp"
DESCRIPTION = "description"
ID = "id"
KIND = "kind"
NAME = "name"
NETWORK = "network"
SELFLINK = "selfLink"
SOURCERANGES = "sourceRanges"
SOURCETAGS = "sourceTags"
TARGETTAGS = "targetTags"
class GetFirewallInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"firewall": {
"type": "string",
"title": "Firewall Name",
"description": "Name of the firewall rule to return",
"order": 1
}
},
"required": [
"firewall"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetFirewallOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"allowed": {
"type": "array",
"title": "Allowed",
"description": "The list of allow rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection",
"items": {
"$ref": "#/definitions/allowed"
},
"order": 10
},
"creationTimestamp": {
"type": "string",
"title": "Creation Timestamp",
"description": "Creation timestamp",
"order": 11
},
"description": {
"type": "string",
"title": "Description",
"description": "A textual description of the operation, which is set when the operation is created",
"order": 5
},
"id": {
"type": "string",
"title": "ID",
"description": "The unique identifier for the resource. This identifier is defined by the server",
"order": 1
},
"kind": {
"type": "string",
"title": "Kind",
"description": "Type of the resource. Always compute#firewall for firewall rules",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the resource, provided by the client when the resource is created",
"order": 3
},
"network": {
"type": "string",
"title": "Network",
"description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default",
"order": 6
},
"selfLink": {
"type": "string",
"title": "Self Link",
"description": "Server-defined url for the resource",
"order": 4
},
"sourceRanges": {
"type": "array",
"title": "Source Ranges",
"description": "If source ranges are specified, the firewall will apply only to traffic that has source ip address in these ranges",
"items": {
"type": "string"
},
"order": 8
},
"sourceTags": {
"type": "array",
"title": "Source Tags",
"description": "If source tags are specified, the firewall will apply only to traffic with source ip that belongs to a tag listed in source tags",
"items": {
"type": "string"
},
"order": 7
},
"targetTags": {
"type": "array",
"title": "Target Tags",
"description": "A list of instance tags indicating sets of instances located in the network that may make network connections as specified in allowed[]",
"items": {
"type": "string"
},
"order": 9
}
},
"definitions": {
"allowed": {
"type": "object",
"title": "allowed",
"properties": {
"IPProtocol": {
"type": "string",
"title": "IPProtocol",
"order": 1
},
"ports": {
"type": "array",
"title": "Ports",
"items": {
"type": "string"
},
"order": 2
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 25.613924 | 176 | 0.551767 | [
"MIT"
] | TonyHamil/insightconnect-plugins | google_cloud_compute/komand_google_cloud_compute/actions/get_firewall/schema.py | 4,047 | Python |
"""This module contains the general information for EquipmentHealthLed ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class EquipmentHealthLedConsts:
COLOR_AMBER = "amber"
COLOR_BLUE = "blue"
COLOR_GREEN = "green"
COLOR_RED = "red"
COLOR_UNKNOWN = "unknown"
HEALTH_LED_STATE_CRITICAL = "critical"
HEALTH_LED_STATE_MINOR = "minor"
HEALTH_LED_STATE_NORMAL = "normal"
OPER_STATE_BLINKING = "blinking"
OPER_STATE_ETH = "eth"
OPER_STATE_FC = "fc"
OPER_STATE_OFF = "off"
OPER_STATE_ON = "on"
OPER_STATE_UNKNOWN = "unknown"
OPER_STATE_UNSUPPORTED = "unsupported"
class EquipmentHealthLed(ManagedObject):
"""This is EquipmentHealthLed class."""
consts = EquipmentHealthLedConsts()
naming_props = set([])
mo_meta = MoMeta("EquipmentHealthLed", "equipmentHealthLed", "health-led", VersionMeta.Version212a, "InputOutput", 0x7f, [], ["admin", "pn-equipment", "pn-maintenance", "pn-policy"], [u'computeBlade', u'computeExtBoard', u'computeRackUnit', u'computeServerUnit', u'equipmentChassis', u'equipmentFanModule', u'equipmentFex', u'equipmentIOCard', u'equipmentPsu'], [u'computeHealthLedSensorAlarm', u'faultInst'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version212a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"color": MoPropertyMeta("color", "color", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["amber", "blue", "green", "red", "unknown"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"health_led_state": MoPropertyMeta("health_led_state", "healthLedState", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["critical", "minor", "normal"], []),
"health_led_state_qualifier": MoPropertyMeta("health_led_state_qualifier", "healthLedStateQualifier", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["blinking", "eth", "fc", "off", "on", "unknown", "unsupported"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"color": "color",
"dn": "dn",
"healthLedState": "health_led_state",
"healthLedStateQualifier": "health_led_state_qualifier",
"id": "id",
"name": "name",
"operState": "oper_state",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.color = None
self.health_led_state = None
self.health_led_state_qualifier = None
self.id = None
self.name = None
self.oper_state = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "EquipmentHealthLed", parent_mo_or_dn, **kwargs)
| 56.266667 | 422 | 0.662322 | [
"Apache-2.0"
] | Curlyfingers/ucsmsdk | ucsmsdk/mometa/equipment/EquipmentHealthLed.py | 4,220 | Python |
from django import forms
class PostForm(forms.Form):
image = forms.ImageField()
image_name = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control","placeholder": "Image Name"}))
image_caption = forms.CharField(widget=forms.Textarea(attrs={"class": "form-control","placeholder": "Image Caption"}))
class CommentForm(forms.Form):
body = forms.CharField(widget=forms.Textarea(attrs={"class": "form-control","placeholder": "Leave a comment!"}))
| 47.5 | 122 | 0.726316 | [
"MIT"
] | Irene-nandy/Instagram | instagramHome/froms.py | 475 | Python |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses Modin configs originated from environment variables."""
import os
import sys
from textwrap import dedent
import warnings
from packaging import version
import secrets
from .pubsub import Parameter, _TYPE_PARAMS, ExactStr, ValueSource
class EnvironmentVariable(Parameter, type=str, abstract=True):
"""Base class for environment variables-based configuration."""
varname: str = None
@classmethod
def _get_raw_from_config(cls) -> str:
"""
Read the value from environment variable.
Returns
-------
str
Config raw value.
Raises
------
KeyError
If value is absent.
"""
return os.environ[cls.varname]
@classmethod
def get_help(cls) -> str:
"""
Generate user-presentable help for the config.
Returns
-------
str
"""
help = f"{cls.varname}: {dedent(cls.__doc__ or 'Unknown').strip()}\n\tProvide {_TYPE_PARAMS[cls.type].help}"
if cls.choices:
help += f" (valid examples are: {', '.join(str(c) for c in cls.choices)})"
return help
class IsDebug(EnvironmentVariable, type=bool):
"""Force Modin engine to be "Python" unless specified by $MODIN_ENGINE."""
varname = "MODIN_DEBUG"
class Engine(EnvironmentVariable, type=str):
"""Distribution engine to run queries by."""
varname = "MODIN_ENGINE"
choices = ("Ray", "Dask", "Python", "Native")
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
str
"""
if IsDebug.get():
return "Python"
try:
import ray
except ImportError:
pass
else:
if version.parse(ray.__version__) < version.parse("1.4.0"):
raise ImportError(
"Please `pip install modin[ray]` to install compatible Ray version."
)
return "Ray"
try:
import dask
import distributed
except ImportError:
pass
else:
if version.parse(dask.__version__) < version.parse(
"2.22.0"
) or version.parse(distributed.__version__) < version.parse("2.22.0"):
raise ImportError(
"Please `pip install modin[dask]` to install compatible Dask version."
)
return "Dask"
try:
import omniscidbe # noqa
except ImportError:
try:
import dbe # noqa
except ImportError:
pass
else:
return "Native"
else:
return "Native"
raise ImportError(
"Please refer to installation documentation page to install an engine"
)
class Backend(EnvironmentVariable, type=str):
"""Engine to run on a single node of distribution."""
varname = "MODIN_BACKEND"
default = "Pandas"
choices = ("Pandas", "OmniSci", "Pyarrow", "Cudf")
class IsExperimental(EnvironmentVariable, type=bool):
"""Whether to Turn on experimental features."""
varname = "MODIN_EXPERIMENTAL"
class IsRayCluster(EnvironmentVariable, type=bool):
"""Whether Modin is running on pre-initialized Ray cluster."""
varname = "MODIN_RAY_CLUSTER"
class RayRedisAddress(EnvironmentVariable, type=ExactStr):
"""Redis address to connect to when running in Ray cluster."""
varname = "MODIN_REDIS_ADDRESS"
class RayRedisPassword(EnvironmentVariable, type=ExactStr):
"""What password to use for connecting to Redis."""
varname = "MODIN_REDIS_PASSWORD"
default = secrets.token_hex(32)
class CpuCount(EnvironmentVariable, type=int):
"""How many CPU cores to use during initialization of the Modin engine."""
varname = "MODIN_CPUS"
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
int
"""
import multiprocessing
return multiprocessing.cpu_count()
class GpuCount(EnvironmentVariable, type=int):
"""How may GPU devices to utilize across the whole distribution."""
varname = "MODIN_GPUS"
class Memory(EnvironmentVariable, type=int):
"""
How much memory (in bytes) give to an execution engine.
Notes
-----
* In Ray case: the amount of memory to start the Plasma object store with.
* In Dask case: the amount of memory that is given to each worker depending on CPUs used.
"""
varname = "MODIN_MEMORY"
class NPartitions(EnvironmentVariable, type=int):
"""How many partitions to use for a Modin DataFrame (along each axis)."""
varname = "MODIN_NPARTITIONS"
@classmethod
def _put(cls, value):
"""
Put specific value if NPartitions wasn't set by a user yet.
Parameters
----------
value : int
Config value to set.
Notes
-----
This method is used to set NPartitions from cluster resources internally
and should not be called by a user.
"""
if cls.get_value_source() == ValueSource.DEFAULT:
cls.put(value)
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
int
"""
if Backend.get() == "Cudf":
return GpuCount.get()
else:
return CpuCount.get()
class SocksProxy(EnvironmentVariable, type=ExactStr):
"""SOCKS proxy address if it is needed for SSH to work."""
varname = "MODIN_SOCKS_PROXY"
class DoLogRpyc(EnvironmentVariable, type=bool):
"""Whether to gather RPyC logs (applicable for remote context)."""
varname = "MODIN_LOG_RPYC"
class DoTraceRpyc(EnvironmentVariable, type=bool):
"""Whether to trace RPyC calls (applicable for remote context)."""
varname = "MODIN_TRACE_RPYC"
class OmnisciFragmentSize(EnvironmentVariable, type=int):
"""How big a fragment in OmniSci should be when creating a table (in rows)."""
varname = "MODIN_OMNISCI_FRAGMENT_SIZE"
class DoUseCalcite(EnvironmentVariable, type=bool):
"""Whether to use Calcite for OmniSci queries execution."""
varname = "MODIN_USE_CALCITE"
default = True
class TestDatasetSize(EnvironmentVariable, type=str):
"""Dataset size for running some tests."""
varname = "MODIN_TEST_DATASET_SIZE"
choices = ("Small", "Normal", "Big")
class TestRayClient(EnvironmentVariable, type=bool):
"""Set to true to start and connect Ray client before a testing session starts."""
varname = "MODIN_TEST_RAY_CLIENT"
default = False
class TrackFileLeaks(EnvironmentVariable, type=bool):
"""Whether to track for open file handles leakage during testing."""
varname = "MODIN_TEST_TRACK_FILE_LEAKS"
# Turn off tracking on Windows by default because
# psutil's open_files() can be extremely slow on Windows (up to adding a few hours).
# see https://github.com/giampaolo/psutil/pull/597
default = sys.platform != "win32"
class AsvImplementation(EnvironmentVariable, type=ExactStr):
"""Allows to select a library that we will use for testing performance."""
varname = "MODIN_ASV_USE_IMPL"
choices = ("modin", "pandas")
default = "modin"
class AsvDataSizeConfig(EnvironmentVariable, type=ExactStr):
"""Allows to override default size of data (shapes)."""
varname = "MODIN_ASV_DATASIZE_CONFIG"
default = None
class ProgressBar(EnvironmentVariable, type=bool):
"""Whether or not to show the progress bar."""
varname = "MODIN_PROGRESS_BAR"
default = False
@classmethod
def enable(cls):
"""Enable ``ProgressBar`` feature."""
cls.put(True)
@classmethod
def disable(cls):
"""Disable ``ProgressBar`` feature."""
cls.put(False)
@classmethod
def put(cls, value):
"""
Set ``ProgressBar`` value only if synchronous benchmarking is disabled.
Parameters
----------
value : bool
Config value to set.
"""
if value and BenchmarkMode.get():
raise ValueError("ProgressBar isn't compatible with BenchmarkMode")
super().put(value)
class BenchmarkMode(EnvironmentVariable, type=bool):
"""Whether or not to perform computations synchronously."""
varname = "MODIN_BENCHMARK_MODE"
default = False
@classmethod
def put(cls, value):
"""
Set ``BenchmarkMode`` value only if progress bar feature is disabled.
Parameters
----------
value : bool
Config value to set.
"""
if value and ProgressBar.get():
raise ValueError("BenchmarkMode isn't compatible with ProgressBar")
super().put(value)
class PersistentPickle(EnvironmentVariable, type=bool):
"""Wheather serialization should be persistent."""
varname = "MODIN_PERSISTENT_PICKLE"
# When set to off, it allows faster serialization which is only
# valid in current run (i.e. useless for saving to disk).
# When set to on, Modin objects could be saved to disk and loaded
# but serialization/deserialization could take more time.
default = False
class OmnisciLaunchParameters(EnvironmentVariable, type=dict):
"""
Additional command line options for the OmniSci engine.
Please visit OmniSci documentation for the description of available parameters:
https://docs.omnisci.com/installation-and-configuration/config-parameters#configuration-parameters-for-omniscidb
"""
varname = "MODIN_OMNISCI_LAUNCH_PARAMETERS"
default = {
"enable_union": 1,
"enable_columnar_output": 1,
"enable_lazy_fetch": 0,
"null_div_by_zero": 1,
"enable_watchdog": 0,
}
@classmethod
def get(self):
"""
Get the resulted command-line options.
Decode and merge specified command-line options with the default one.
Returns
-------
dict
Decoded and verified config value.
"""
custom_parameters = super().get()
result = self.default.copy()
result.update(
{key.replace("-", "_"): value for key, value in custom_parameters.items()}
)
return result
def _check_vars():
"""
Check validity of environment variables.
Look out for any environment variables that start with "MODIN_" prefix
that are unknown - they might be a typo, so warn a user.
"""
valid_names = {
obj.varname
for obj in globals().values()
if isinstance(obj, type)
and issubclass(obj, EnvironmentVariable)
and not obj.is_abstract
}
found_names = {name for name in os.environ if name.startswith("MODIN_")}
unknown = found_names - valid_names
if unknown:
warnings.warn(
f"Found unknown environment variable{'s' if len(unknown) > 1 else ''},"
f" please check {'their' if len(unknown) > 1 else 'its'} spelling: "
+ ", ".join(sorted(unknown))
)
_check_vars()
| 27.837963 | 116 | 0.633378 | [
"ECL-2.0",
"Apache-2.0"
] | atomicai/modin | modin/config/envvars.py | 12,026 | Python |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body
class MonitoringClient(NamespacedClient):
@query_params("interval", "system_api_version", "system_id")
def bulk(self, body, doc_type=None, params=None, headers=None):
"""
Used by the monitoring features to send monitoring data.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.10/monitor-elasticsearch-cluster.html>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg body: The operation definition and data (action-data
pairs), separated by newlines
:arg doc_type: Default document type for items which don't
provide one
:arg interval: Collection interval (e.g., '10s' or '10000ms') of
the payload
:arg system_api_version: API Version of the monitored system
:arg system_id: Identifier of the monitored system
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
body = _bulk_body(self.transport.serializer, body)
return self.transport.perform_request(
"POST",
_make_path("_monitoring", doc_type, "bulk"),
params=params,
headers=headers,
body=body,
)
| 40.888889 | 108 | 0.685688 | [
"MIT"
] | PatrickJD/AWS | AB/lambda/elasticindex/elasticsearch/client/monitoring.py | 2,208 | Python |
# Simple script for updating the build number in pubspec.yaml
import re
# Regex patter to be used to ID the correct line in pubspec.yaml
version_line_pattern = "version:\s+\d+\.\d+\.\d+\+\d+"
# Open pubspec.yaml and read lines into memory
with open("pubspec.yaml", "r") as current_pubspec:
contents = current_pubspec.readlines()
# Reopen pubspec.yaml for writing and update
with open("pubspec.yaml", "w") as updated_pubspec:
# Find and bump build number
counter = 0
for line in contents:
if re.match(pattern=version_line_pattern, string=line):
line_array = line.split("+")
contents[counter] = line_array[0] + "+" + str(int(line_array[1]) + 1) + "\n"
break
counter += 1
# Write updated contents back to disk
updated_pubspec.writelines(contents)
| 33.32 | 88 | 0.662665 | [
"MIT"
] | EpicSalvation/FlutterStep | increment_build.py | 833 | Python |
import math
import warnings
import numpy as np
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
import matplotlib.axis as maxis
from matplotlib import cbook
from matplotlib import docstring
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, FormatStrFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper, \
ScaledTranslation, blended_transform_factory, BboxTransformToMaxOnly
import matplotlib.spines as mspines
class PolarAxes(Axes):
"""
A polar graph projection, where the input dimensions are *theta*, *r*.
Theta starts pointing east and goes anti-clockwise.
"""
name = 'polar'
class PolarTransform(Transform):
"""
The base polar transform. This handles projection *theta* and
*r* into Cartesian coordinate space *x* and *y*, but does not
perform the ultimate affine transformation into the correct
position.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform(self, tr):
xy = np.empty(tr.shape, np.float_)
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
t *= theta_direction
t += theta_offset
if rmin != 0:
r = r - rmin
mask = r < 0
x[:] = np.where(mask, np.nan, r * np.cos(t))
y[:] = np.where(mask, np.nan, r * np.sin(t))
else:
x[:] = r * np.cos(t)
y[:] = r * np.sin(t)
return xy
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
return Path(self.transform(vertices), path.codes)
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return PolarAxes.InvertedPolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class PolarAffine(Affine2DBase):
"""
The affine part of the polar projection. Scales the output so
that maximum radius rests on the edge of the axes circle.
"""
def __init__(self, scale_transform, limits):
"""
*limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2pi).
"""
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
def get_matrix(self):
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
yscale = limits_scaled.ymax - limits_scaled.ymin
affine = Affine2D() \
.scale(0.5 / yscale) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class InvertedPolarTransform(Transform):
"""
The inverse of the polar transform, mapping Cartesian
coordinate space *x* and *y* back to *theta* and *r*.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform(self, xy):
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
x = xy[:, 0:1]
y = xy[:, 1:]
r = np.sqrt(x*x + y*y)
theta = np.arccos(x / r)
theta = np.where(y < 0, 2 * np.pi - theta, theta)
theta -= theta_offset
theta *= theta_direction
r += rmin
return np.concatenate((theta, r), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return PolarAxes.PolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class ThetaFormatter(Formatter):
"""
Used to format the *theta* tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % ((x / np.pi) * 180.0)
else:
# we use unicode, rather than mathtext with \circ, so
# that it will work correctly with any arbitrary font
# (assuming it has a degree sign), whereas $5\circ$
# will only work correctly with one of the supported
# math fonts (Computer Modern and STIX)
return u"%0.0f\u00b0" % ((x / np.pi) * 180.0)
class RadialLocator(Locator):
"""
Used to locate radius ticks.
Ensures that all ticks are strictly positive. For all other
tasks, it delegates to the base
:class:`~matplotlib.ticker.Locator` (which may be different
depending on the scale of the *r*-axis.
"""
def __init__(self, base):
self.base = base
def __call__(self):
ticks = self.base()
return [x for x in ticks if x > 0]
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def zoom(self, direction):
return self.base.zoom(direction)
def refresh(self):
return self.base.refresh()
def view_limits(self, vmin, vmax):
vmin, vmax = self.base.view_limits(vmin, vmax)
return 0, vmax
def __init__(self, *args, **kwargs):
"""
Create a new Polar Axes for a polar plot.
The following optional kwargs are supported:
- *resolution*: The number of points of interpolation between
each pair of data points. Set to 1 to disable
interpolation.
"""
self.resolution = kwargs.pop('resolution', None)
if self.resolution not in (None, 1):
warnings.warn(
"""The resolution kwarg to Polar plots is now ignored.
If you need to interpolate data points, consider running
cbook.simple_linear_interpolation on the data before passing to matplotlib.""")
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
__init__.__doc__ = Axes.__init__.__doc__
def cla(self):
Axes.cla(self)
self.title.set_y(1.05)
self.xaxis.set_major_formatter(self.ThetaFormatter())
self.xaxis.isDefault_majfmt = True
angles = np.arange(0.0, 360.0, 45.0)
self.set_thetagrids(angles)
self.yaxis.set_major_locator(self.RadialLocator(self.yaxis.get_major_locator()))
self.grid(rcParams['polaraxes.grid'])
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.set_theta_offset(0)
self.set_theta_direction(1)
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Calling polar_axes.xaxis.cla() or polar_axes.xaxis.cla()
# results in weird artifacts. Therefore we disable this for
# now.
# self.spines['polar'].register_axis(self.yaxis)
self._update_transScale()
def _set_lim_and_transforms(self):
self.transAxes = BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = TransformWrapper(IdentityTransform())
# A (possibly non-linear) projection on the (already scaled)
# data. This one is aware of rmin
self.transProjection = self.PolarTransform(self)
# This one is not aware of rmin
self.transPureProjection = self.PolarTransform(self, use_rmin=False)
# An affine transformation on the data, generally to limit the
# range of the axes
self.transProjectionAffine = self.PolarAffine(self.transScale, self.viewLim)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = self.transScale + self.transProjection + \
(self.transProjectionAffine + self.transAxes)
# This is the transform for theta-axis ticks. It is
# equivalent to transData, except it always puts r == 1.0 at
# the edge of the axis circle.
self._xaxis_transform = (
self.transPureProjection +
self.PolarAffine(IdentityTransform(), Bbox.unit()) +
self.transAxes)
# The theta labels are moved from radius == 0.0 to radius == 1.1
self._theta_label1_position = Affine2D().translate(0.0, 1.1)
self._xaxis_text1_transform = (
self._theta_label1_position +
self._xaxis_transform)
self._theta_label2_position = Affine2D().translate(0.0, 1.0 / 1.1)
self._xaxis_text2_transform = (
self._theta_label2_position +
self._xaxis_transform)
# This is the transform for r-axis ticks. It scales the theta
# axis so the gridlines from 0.0 to 1.0, now go from 0.0 to
# 2pi.
self._yaxis_transform = (
Affine2D().scale(np.pi * 2.0, 1.0) +
self.transData)
# The r-axis labels are put at an angle and padded in the r-direction
self._r_label_position = ScaledTranslation(
22.5, 0.0, Affine2D())
self._yaxis_text_transform = (
self._r_label_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
def get_xaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'center', 'center'
def get_yaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'bottom', 'left'
elif angle < 180.:
return self._yaxis_text_transform, 'bottom', 'right'
elif angle < 270.:
return self._yaxis_text_transform, 'top', 'right'
else:
return self._yaxis_text_transform, 'top', 'left'
def get_yaxis_text2_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'top', 'right'
elif angle < 180.:
return self._yaxis_text_transform, 'top', 'left'
elif angle < 270.:
return self._yaxis_text_transform, 'bottom', 'left'
else:
return self._yaxis_text_transform, 'bottom', 'right'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'polar':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_rmax(self, rmax):
self.viewLim.y1 = rmax
def get_rmax(self):
return self.viewLim.ymax
def set_rmin(self, rmin):
self.viewLim.y0 = rmin
def get_rmin(self):
return self.viewLim.ymin
def set_theta_offset(self, offset):
"""
Set the offset for the location of 0 in radians.
"""
self._theta_offset = offset
def get_theta_offset(self):
"""
Get the offset for the location of 0 in radians.
"""
return self._theta_offset
def set_theta_zero_location(self, loc):
"""
Sets the location of theta's zero. (Calls set_theta_offset
with the correct value in radians under the hood.)
May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".
"""
mapping = {
'N': np.pi * 0.5,
'NW': np.pi * 0.75,
'W': np.pi,
'SW': np.pi * 1.25,
'S': np.pi * 1.5,
'SE': np.pi * 1.75,
'E': 0,
'NE': np.pi * 0.25 }
return self.set_theta_offset(mapping[loc])
def set_theta_direction(self, direction):
"""
Set the direction in which theta increases.
clockwise, -1:
Theta increases in the clockwise direction
counterclockwise, anticlockwise, 1:
Theta increases in the counterclockwise direction
"""
if direction in ('clockwise',):
self._direction = -1
elif direction in ('counterclockwise', 'anticlockwise'):
self._direction = 1
elif direction in (1, -1):
self._direction = direction
else:
raise ValueError("direction must be 1, -1, clockwise or counterclockwise")
def get_theta_direction(self):
"""
Get the direction in which theta increases.
-1:
Theta increases in the clockwise direction
1:
Theta increases in the counterclockwise direction
"""
return self._direction
def set_rlim(self, *args, **kwargs):
if 'rmin' in kwargs:
kwargs['ymin'] = kwargs.pop('rmin')
if 'rmax' in kwargs:
kwargs['ymax'] = kwargs.pop('rmax')
return self.set_ylim(*args, **kwargs)
def set_yscale(self, *args, **kwargs):
Axes.set_yscale(self, *args, **kwargs)
self.yaxis.set_major_locator(
self.RadialLocator(self.yaxis.get_major_locator()))
set_rscale = Axes.set_yscale
set_rticks = Axes.set_yticks
@docstring.dedent_interpd
def set_thetagrids(self, angles, labels=None, frac=None, fmt=None,
**kwargs):
"""
Set the angles at which to place the theta grids (these
gridlines are equal along the theta dimension). *angles* is in
degrees.
*labels*, if not None, is a ``len(angles)`` list of strings of
the labels to use at each angle.
If *labels* is None, the labels will be ``fmt %% angle``
*frac* is the fraction of the polar axes radius at which to
place the label (1 is the edge). Eg. 1.05 is outside the axes
and 0.95 is inside the axes.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
angles = np.asarray(angles, np.float_)
self.set_xticks(angles * (np.pi / 180.0))
if labels is not None:
self.set_xticklabels(labels)
elif fmt is not None:
self.xaxis.set_major_formatter(FormatStrFormatter(fmt))
if frac is not None:
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, 1.0 / frac)
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
@docstring.dedent_interpd
def set_rgrids(self, radii, labels=None, angle=None, fmt=None,
**kwargs):
"""
Set the radial locations and labels of the *r* grids.
The labels will appear at radial distances *radii* at the
given *angle* in degrees.
*labels*, if not None, is a ``len(radii)`` list of strings of the
labels to use at each radius.
If *labels* is None, the built-in formatter will be used.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
radii = np.asarray(radii)
rmin = radii.min()
if rmin <= 0:
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if labels is not None:
self.set_yticklabels(labels)
elif fmt is not None:
self.yaxis.set_major_formatter(FormatStrFormatter(fmt))
if angle is None:
angle = self._r_label_position.to_values()[4]
self._r_label_position._t = (angle, 0.0)
self._r_label_position.invalidate()
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()
def set_xscale(self, scale, *args, **kwargs):
if scale != 'linear':
raise NotImplementedError("You can not set the xscale on a polar plot.")
def set_xlim(self, *args, **kargs):
# The xlim is fixed, no matter what you do
self.viewLim.intervalx = (0.0, np.pi * 2.0)
def format_coord(self, theta, r):
"""
Return a format string formatting the coordinate using Unicode
characters.
"""
theta /= math.pi
# \u03b8: lower-case theta
# \u03c0: lower-case pi
# \u00b0: degree symbol
return u'\u03b8=%0.3f\u03c0 (%0.3f\u00b0), r=%0.3f' % (theta, theta * 180.0, r)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
Polar axes do not support zoom boxes.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
For polar axes, this is slightly misleading. Both panning and
zooming are performed by the same button. Panning is performed
in azimuth while zooming is done along the radial.
"""
return True
def start_pan(self, x, y, button):
angle = np.deg2rad(self._r_label_position.to_values()[4])
mode = ''
if button == 1:
epsilon = np.pi / 45.0
t, r = self.transData.inverted().transform_point((x, y))
if t >= angle - epsilon and t <= angle + epsilon:
mode = 'drag_r_labels'
elif button == 3:
mode = 'zoom'
self._pan_start = cbook.Bunch(
rmax = self.get_rmax(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
r_label_angle = self._r_label_position.to_values()[4],
x = x,
y = y,
mode = mode
)
def end_pan(self):
del self._pan_start
def drag_pan(self, button, key, x, y):
p = self._pan_start
if p.mode == 'drag_r_labels':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with theta
dt0 = t - startt
dt1 = startt - t
if abs(dt1) < abs(dt0):
dt = abs(dt1) * sign(dt0) * -1.0
else:
dt = dt0 * -1.0
dt = (dt / np.pi) * 180.0
self._r_label_position._t = (p.r_label_angle - dt, 0.0)
self._r_label_position.invalidate()
trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0)
trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0)
for t in self.yaxis.majorTicks + self.yaxis.minorTicks:
t.label1.set_va(vert1)
t.label1.set_ha(horiz1)
t.label2.set_va(vert2)
t.label2.set_ha(horiz2)
elif p.mode == 'zoom':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
dr = r - startr
# Deal with r
scale = r / startr
self.set_rmax(p.rmax / scale)
# These are a couple of aborted attempts to project a polar plot using
# cubic bezier curves.
# def transform_path(self, path):
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# vertices = self.transform(vertices)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# kappa = 0.5
# p0 = vertices[0:-1]
# p1 = vertices[1: ]
# x0 = p0[:, 0:1]
# y0 = p0[:, 1: ]
# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
# a0 = y0 - b0*x0
# x1 = p1[:, 0:1]
# y1 = p1[:, 1: ]
# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
# a1 = y1 - b1*x1
# x = -(a0-a1) / (b0-b1)
# y = a0 + b0*x
# xk = (x - x0) * kappa + x0
# yk = (y - y0) * kappa + y0
# result[1::3, 0:1] = xk
# result[1::3, 1: ] = yk
# xk = (x - x1) * kappa + x1
# yk = (y - y1) * kappa + y1
# result[2::3, 0:1] = xk
# result[2::3, 1: ] = yk
# result[3::3] = p1
# print vertices[-2:]
# print result[-2:]
# return mpath.Path(result, codes)
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# print "interpolate", interpolate
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# tkappa = np.arctan(kappa)
# hyp_kappa = np.sqrt(kappa*kappa + 1.0)
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# r0 = vertices[0:-1, 1]
# r1 = vertices[1: , 1]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# td_scaled = td / (np.pi * 0.5)
# rd = r1 - r0
# r0kappa = r0 * kappa * td_scaled
# r1kappa = r1 * kappa * td_scaled
# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
# result[1::3, 0] = t0 + (tkappa * td_scaled)
# result[1::3, 1] = r0*hyp_kappa
# # result[1::3, 1] = r0 / np.cos(tkappa * td_scaled) # np.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
# result[2::3, 0] = t1 - (tkappa * td_scaled)
# result[2::3, 1] = r1*hyp_kappa
# # result[2::3, 1] = r1 / np.cos(tkappa * td_scaled) # np.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
# result[3::3, 0] = t1
# result[3::3, 1] = r1
# print vertices[:6], result[:6], t0[:6], t1[:6], td[:6], td_scaled[:6], tkappa
# result = self.transform(result)
# return mpath.Path(result, codes)
# transform_path_non_affine = transform_path
| 35.076216 | 106 | 0.559173 | [
"BSD-3-Clause"
] | ashley8jain/IITD-complaint-system-web | lib/python2.7/matplotlib/projections/polar.py | 26,693 | Python |
import cgi
import errno
import mimetypes
import os
import posixpath
import re
import shutil
import stat
import sys
import tempfile
try:
from urllib.request import urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve
from optparse import make_option
from os import path
import django
from django.template import Template, Context
from django.utils import archive
from django.utils._os import rmtree_errorhandler
from django.core.management.base import BaseCommand, CommandError
from django.core.management.commands.makemessages import handle_extensions
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
class TemplateCommand(BaseCommand):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
:param style: A color style object (see django.core.management.color).
:param app_or_project: The string 'app' or 'project'.
:param name: The name of the application or project.
:param directory: The directory to which the template should be copied.
:param options: The additional variables passed to project or app templates
"""
args = "[name] [optional destination directory]"
option_list = BaseCommand.option_list + (
make_option('--template',
action='store', dest='template',
help='The dotted import path to load the template from.'),
make_option('--extension', '-e', dest='extensions',
action='append', default=['py'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.'),
make_option('--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. '
'Separate multiple extensions with commas, or use '
'-n multiple times.')
)
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
# The supported URL schemes
url_schemes = ['http', 'https', 'ftp']
# Can't perform any active locale changes during this command, because
# setting might not be available at all.
leave_locale_alone = True
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.paths_to_remove = []
self.verbosity = int(options.get('verbosity'))
self.validate_name(name, app_or_project)
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except OSError as e:
if e.errno == errno.EEXIST:
message = "'%s' already exists" % top_dir
else:
message = e
raise CommandError(message)
else:
top_dir = os.path.abspath(path.expanduser(target))
if not os.path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please create it first." % top_dir)
extensions = tuple(
handle_extensions(options.get('extensions'), ignored=()))
extra_files = []
for file in options.get('files'):
extra_files.extend(map(lambda x: x.strip(), file.split(',')))
if self.verbosity >= 2:
self.stdout.write("Rendering %s template files with "
"extensions: %s\n" %
(app_or_project, ', '.join(extensions)))
self.stdout.write("Rendering %s template files with "
"filenames: %s\n" %
(app_or_project, ', '.join(extra_files)))
base_name = '%s_name' % app_or_project
base_subdir = '%s_template' % app_or_project
base_directory = '%s_directory' % app_or_project
if django.VERSION[-1] == 0:
docs_version = 'dev'
else:
docs_version = '%d.%d' % django.VERSION[:2]
context = Context(dict(options, **{
base_name: name,
base_directory: top_dir,
'docs_version': docs_version,
}), autoescape=False)
# Setup a stub settings environment for template rendering
from django.conf import settings
if not settings.configured:
settings.configure()
template_dir = self.handle_template(options.get('template'),
base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
for dirname in dirs[:]:
if dirname.startswith('.') or dirname == '__pycache__':
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir,
filename.replace(base_name, name))
if path.exists(new_path):
raise CommandError("%s already exists, overlaying a "
"project or app into an existing "
"directory won't replace conflicting "
"files" % new_path)
# Only render the Python files, as we don't want to
# accidentally render Django templates files
with open(old_path, 'rb') as template_file:
content = template_file.read()
if filename.endswith(extensions) or filename in extra_files:
content = content.decode('utf-8')
template = Template(content)
content = template.render(context)
content = content.encode('utf-8')
with open(new_path, 'wb') as new_file:
new_file.write(content)
if self.verbosity >= 2:
self.stdout.write("Creating %s\n" % new_path)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.\n")
for path_to_remove in self.paths_to_remove:
if path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove,
onerror=rmtree_errorhandler)
def handle_template(self, template, subdir):
"""
Determines where the app or project templates are.
Use django.__path__[0] as the default because we don't
know into which directory Django has been installed.
"""
if template is None:
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
# downloads the file and returns the path
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError("couldn't handle %s template %s." %
(self.app_or_project, template))
def validate_name(self, name, app_or_project):
if name is None:
raise CommandError("you must provide %s %s name" % (
"an" if app_or_project == "app" else "a", app_or_project))
# If it's not a valid directory name.
if not re.search(r'^[_a-zA-Z]\w*$', name):
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." %
(name, app_or_project, message))
def download(self, url):
"""
Downloads the given URL and returns the file name.
"""
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[-1]
if url.endswith('/'):
display_url = tmp + '/'
else:
display_url = url
return filename, display_url
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s\n" % display_url)
try:
the_path, info = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError("couldn't download URL %s to %s: %s" %
(url, filename, e))
used_name = the_path.split('/')[-1]
# Trying to get better name from response headers
content_disposition = info.get('content-disposition')
if content_disposition:
_, params = cgi.parse_header(content_disposition)
guessed_filename = params.get('filename') or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognnized by the archive utils
if used_name != guessed_filename:
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
"""
Like os.path.splitext, but takes off .tar, too
"""
base, ext = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
"""
Extracts the given file to a temporarily and returns
the path of the directory with the extracted content.
"""
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s\n" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError("couldn't extract file %s to %s: %s" %
(filename, tempdir, e))
def is_url(self, template):
"""
Returns True if the name looks like a URL
"""
if ':' not in template:
return False
scheme = template.split(':', 1)[0].lower()
return scheme in self.url_schemes
def make_writeable(self, filename):
"""
Make sure that the file is writeable.
Useful if our source is read-only.
"""
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| 40.364742 | 81 | 0.565437 | [
"BSD-3-Clause"
] | LuanP/django | django/core/management/templates.py | 13,280 | Python |
"""
Using the csv module.
"""
import csv
def parse(csvfilename):
"""
Reads CSV file named csvfilename, parses
it's content and returns the data within
the file as a list of lists.
"""
table = []
with open(csvfilename, "r") as csvfile:
csvreader = csv.reader(csvfile,
skipinitialspace=True)
for row in csvreader:
table.append(row)
return table
def print_table(table):
"""
Print out table, which must be a list
of lists, in a nicely formatted way.
"""
for row in table:
# Header column left justified
print("{:<19}".format(row[0]), end='')
# Remaining columns right justified
for col in row[1:]:
print("{:>4}".format(col), end='')
print("", end='\n')
table = parse("hightemp.csv")
print_table(table)
print("")
print("")
table2 = parse("hightemp2.csv")
print_table(table2)
| 21.860465 | 53 | 0.578723 | [
"Apache-2.0"
] | Abu-Kaisar/Courses- | Rice-Python-Data-Analysis/week3/examples3_csvmodule.py | 940 | Python |
"""
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
from train.metrics import accuracy_MNIST_CIFAR as accuracy
def train_epoch(model, optimizer, device, data_loader, epoch=0):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
batch_snorm_n = batch_snorm_n.to(device) # num x 1
optimizer.zero_grad()
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(batch_scores, batch_labels)
nb_data += batch_labels.size(0)
epoch_loss /= (iter + 1)
epoch_train_acc /= nb_data
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network(model, device, data_loader, epoch=0):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
batch_snorm_n = batch_snorm_n.to(device)
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
nb_data += batch_labels.size(0)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= nb_data
return epoch_test_loss, epoch_test_acc | 38.775862 | 103 | 0.663406 | [
"MIT"
] | nfkjsfoeif/AutoGCN | train/train_superpixels_graph_classification.py | 2,249 | Python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import logging
import zipfile
import requests
from .record import Record
logger = logging.getLogger(__name__)
# Module API
def collect(conf, conn):
"""Collect ICD-XX-PCS procedures.
"""
# For more information see:
# https://www.cms.gov/Medicare/Coding/ICD10/2016-ICD-10-PCS-and-GEMs.html
URL = 'https://www.cms.gov/Medicare/Coding/ICD10/Downloads/2016-PCS-Long-Abbrev-Titles.zip'
FILE = 'icd10pcs_order_2016.txt'
VERSION = 'ICD-10-PCS'
LAST_UPDATED = '2015-10-01'
# Prepare file
zip = requests.get(URL).content
file = zipfile.ZipFile(io.BytesIO(zip)).open(FILE)
count = 0
for line in file:
# Prepare data
# Format is described in instruction
# stored in zip archive we download
data = {
'code': line[6:6+7].strip(),
'is_header': line[14:14+1].strip(),
'short_description': line[16:16+60].strip(),
'long_description': line[77:].strip(),
'version': VERSION,
'last_updated': LAST_UPDATED,
}
# Create record
record = Record.create(URL, data)
# Write record
record.write(conf, conn)
# Log info
count += 1
if not count % 100:
logger.info('Collected %s "%s" interventions', count, record.table)
| 27.017857 | 95 | 0.62657 | [
"MIT"
] | almeidaah/collectors | collectors/icdpcs/collector.py | 1,513 | Python |
# encoding: utf-8
import io
from setuptools import find_packages, setup
from har2case import __version__
with io.open("README.rst", encoding='utf-8') as f:
long_description = f.read()
install_requires = open("requirements.txt").readlines()
setup(
name='har2case',
version=__version__,
description='Convert HAR(HTTP Archive) to YAML/JSON testcases for HttpRunner.',
long_description=long_description,
author='Leo Lee',
author_email='[email protected]',
url='https://github.com/HttpRunner/har2case',
license='MIT',
packages=find_packages(exclude=['test.*', 'test']),
package_data={},
keywords='har converter yaml json',
install_requires=install_requires,
classifiers=[
"Development Status :: 3 - Alpha",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
entry_points={
'console_scripts': [
'har2case=har2case.cli:main'
]
}
)
| 28.25 | 83 | 0.645133 | [
"MIT"
] | ddkwing/har2case | setup.py | 1,130 | Python |
import requests
import os
class IntegrationDiscordDriver:
_scope = ''
_state = ''
def scopes(self, scopes):
pass
def send(self, request, state='', scopes=('identify',)):
self._scope = scopes
self._state = state
return request.redirect('https://discordapp.com/api/oauth2/authorize?response_type=code&client_id={}&scope={}&state={}&redirect_uri={}'.format(
os.getenv('DISCORD_CLIENT'),
' '.join(self._scope),
self._state,
os.getenv('DISCORD_REDIRECT'),
))
def user(self, request):
data = {
'client_id': os.getenv('DISCORD_CLIENT'),
'client_secret': os.getenv('DISCORD_SECRET'),
'grant_type': 'authorization_code',
'code': request.input('code'),
'redirect_uri': os.getenv('DISCORD_REDIRECT')
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
return requests.post('https://discordapp.com/api/oauth2/token', data, headers).json()
def refresh(self, refresh_token):
data = {
'client_id': os.getenv('DISCORD_CLIENT'),
'client_secret': os.getenv('DISCORD_SECRET'),
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
'redirect_uri': os.getenv('DISCORD_REDIRECT')
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
return requests.post('https://discordapp.com/api/oauth2/token', data, headers).json()
| 31.058824 | 151 | 0.576389 | [
"MIT"
] | josephmancuso/gbaleague-masonite2 | app/integrations/IntegrationDiscordDriver.py | 1,584 | Python |
#!/usr/bin/env python
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='bcpy',
version='0.1',
author='Igor Neves Faustino',
author_email='[email protected]',
url='https://github.com/igornfaustino/bcpy.git',
description='library for BCI signal analysis',
long_description=long_description,
long_description_content_type="text/markdown",
license='MIT',
packages=find_packages(),
# entry_points={
# 'console_scripts': ['forecastio = displayforecastio.app:run'],
# }
)
| 26.304348 | 72 | 0.682645 | [
"MIT"
] | bneurd/bcpy | setup.py | 605 | Python |
from collections import OrderedDict, defaultdict, namedtuple
from functools import partial
from itertools import groupby
from cached_property import cached_property
import numpy as np
from devito.ir import (SEQUENTIAL, PARALLEL, PARALLEL_IF_PVT, ROUNDABLE, DataSpace,
Forward, IterationInstance, IterationSpace, Interval,
IntervalGroup, LabeledVector, Context, detect_accesses,
build_intervals, normalize_properties)
from devito.passes.clusters.utils import timed_pass
from devito.symbolics import (Uxmapper, compare_ops, estimate_cost, q_constant,
q_leaf, retrieve_indexed, search, uxreplace)
from devito.tools import as_tuple, flatten, split
from devito.types import (Array, TempFunction, Eq, Symbol, ModuloDimension,
CustomDimension, IncrDimension)
__all__ = ['cire']
@timed_pass(name='cire')
def cire(clusters, mode, sregistry, options, platform):
"""
Cross-iteration redundancies elimination.
Parameters
----------
cluster : Cluster
Input Cluster, subject of the optimization pass.
mode : str
The transformation mode. Accepted: ['invariants', 'sops'].
* 'invariants' is for sub-expressions that are invariant w.r.t. one or
more Dimensions.
* 'sops' stands for sums-of-products, that is redundancies are searched
across all expressions in sum-of-product form.
sregistry : SymbolRegistry
The symbol registry, to create unique temporary names.
options : dict
The optimization options.
Accepted: ['min-storage', 'cire-maxpar', 'cire-rotate', 'cire-maxalias'].
* 'min-storage': if True, the pass will try to minimize the amount of
storage introduced for the tensor temporaries. This might also reduce
the operation count. On the other hand, this might affect fusion and
therefore data locality. Defaults to False (legacy).
* 'cire-maxpar': if True, privilege parallelism over working set size,
that is the pass will try to create as many parallel loops as possible,
even though this will require more space (Dimensions) for the temporaries.
Defaults to False.
* 'cire-rotate': if True, the pass will use modulo indexing for the
outermost Dimension iterated over by the temporaries. This will sacrifice
a parallel loop for a reduced working set size. Defaults to False (legacy).
* 'cire-maxalias': if True, capture the largest redundancies. This will
minimize the flop count while maximizing the number of tensor temporaries,
thus increasing the working set size.
platform : Platform
The underlying platform. Used to optimize the shape of the introduced
tensor symbols.
Examples
--------
1) 'invariants'. Here's an expensive expression invariant w.r.t. `t`
t0 = (cos(a[x,y,z])*sin(b[x,y,z]))*c[t,x,y,z]
which after CIRE becomes
t1[x,y,z] = cos(a[x,y,z])*sin(b[x,y,z])
t0 = t1[x,y,z]*c[t,x,y,z]
2) 'sops'. Below we see two expressions in sum-of-product form (in this
case, the sum degenerates to a single product).
t0 = 2.0*a[x,y,z]*b[x,y,z]
t1 = 3.0*a[x,y,z+1]*b[x,y,z+1]
CIRE detects that these two expressions are actually redundant and rewrites
them as:
t2[x,y,z] = a[x,y,z]*b[x,y,z]
t0 = 2.0*t2[x,y,z]
t1 = 3.0*t2[x,y,z+1]
"""
if mode == 'invariants':
space = ('inv-basic', 'inv-compound')
elif mode in ('sops',):
space = (mode,)
else:
assert False, "Unknown CIRE mode `%s`" % mode
processed = []
for c in clusters:
# We don't care about sparse Clusters. Their computational cost is
# negligible and processing all of them would only increase compilation
# time and potentially make the generated code more chaotic
if not c.is_dense:
processed.append(c)
continue
# Some of the CIRE transformers need to look inside all scopes
# surrounding `c` to perform data dependencies analysis
context = Context(c).process(clusters)
# Applying CIRE may change `c` as well as creating one or more new Clusters
transformed = _cire(c, context, space, sregistry, options, platform)
processed.extend(transformed)
return processed
def _cire(cluster, context, space, sregistry, options, platform):
# Construct the space of variants
variants = [modes[mode](sregistry, options).make_schedule(cluster, context)
for mode in space]
if not any(i.schedule for i in variants):
return [cluster]
# Pick the variant with the highest score, that is the variant with the best
# trade-off between operation count reduction and working set size increase
schedule, exprs = pick_best(variants)
# Schedule -> [Clusters]
schedule = optimize_schedule(cluster, schedule, platform, sregistry, options)
clusters, subs = lower_schedule(cluster, schedule, sregistry, options)
clusters.append(rebuild(cluster, exprs, subs, schedule))
return clusters
class Cire(object):
"""
Base class for CIRE transformers.
"""
optname = None
mode = None
def __init__(self, sregistry, options):
self.sregistry = sregistry
self._opt_minstorage = options['min-storage']
self._opt_mincost = options['cire-mincost'][self.optname]
self._opt_maxpar = options['cire-maxpar']
self._opt_maxalias = options['cire-maxalias']
def make_schedule(self, cluster, context):
# Capture aliases within `exprs`
aliases = AliasMapper()
score = 0
exprs = cluster.exprs
ispace = cluster.ispace
for n in range(self._nrepeats(cluster)):
# Extract potentially aliasing expressions
mapper = self._extract(exprs, context, n)
# Search aliasing expressions
found = collect(mapper.extracted, ispace, self._opt_minstorage)
# Choose the aliasing expressions with a good flops/memory trade-off
exprs, chosen, pscore = choose(found, exprs, mapper, self._selector)
aliases.update(chosen)
score += pscore
# AliasMapper -> Schedule
schedule = lower_aliases(cluster, aliases, self._in_writeto, self._opt_maxpar)
# The actual score is a 2-tuple <flop-reduction-score, workin-set-score>
score = (score, len(aliases))
return SpacePoint(schedule, exprs, score)
def _make_symbol(self):
return Symbol(name=self.sregistry.make_name('dummy'))
def _nrepeats(self, cluster):
raise NotImplementedError
def _extract(self, exprs, context, n):
raise NotImplementedError
def _in_writeto(self, dim, cluster):
raise NotImplementedError
def _selector(self, e, naliases):
raise NotImplementedError
class CireInvariants(Cire):
optname = 'invariants'
def _nrepeats(self, cluster):
return 1
def _rule(self, e):
return (e.is_Function or
(e.is_Pow and e.exp.is_Number and e.exp < 1))
def _extract(self, exprs, context, n):
mapper = Uxmapper()
for prefix, clusters in context.items():
if not prefix:
continue
exclude = set().union(*[c.scope.writes for c in clusters])
exclude.add(prefix[-1].dim)
for e in exprs:
for i in search(e, self._rule, 'all', 'bfs_first_hit'):
if {a.function for a in i.free_symbols} & exclude:
continue
mapper.add(i, self._make_symbol)
return mapper
def _in_writeto(self, dim, cluster):
return PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if all(i.function.is_Symbol for i in e.free_symbols):
# E.g., `dt**(-2)`
mincost = self._opt_mincost['scalar']
else:
mincost = self._opt_mincost['tensor']
return estimate_cost(e, True)*naliases // mincost
class CireInvariantsBasic(CireInvariants):
mode = 'inv-basic'
class CireInvariantsCompound(CireInvariants):
mode = 'inv-compound'
def _extract(self, exprs, context, n):
extracted = super()._extract(exprs, context, n).extracted
rule = lambda e: any(a in extracted for a in e.args)
mapper = Uxmapper()
for e in exprs:
for i in search(e, rule, 'all', 'dfs'):
if not i.is_commutative:
continue
key = lambda a: a in extracted
terms, others = split(i.args, key)
mapper.add(i, self._make_symbol, terms)
return mapper
class CireSOPS(Cire):
optname = 'sops'
mode = 'sops'
def _nrepeats(self, cluster):
# The `nrepeats` is calculated such that we analyze all potential derivatives
# in `cluster`
return potential_max_deriv_order(cluster.exprs)
def _extract(self, exprs, context, n):
# Forbid CIRE involving Dimension-independent dependencies, e.g.:
# r0 = ...
# u[x, y] = ... r0*a[x, y] ...
# NOTE: if one uses the DSL in a conventional way and sticks to the default
# compilation pipelines where CSE always happens after CIRE, then `exclude`
# will always be empty
exclude = {i.source.indexed for i in context[None].scope.d_flow.independent()}
mapper = Uxmapper()
for e in exprs:
for i in search_potential_deriv(e, n):
if i.free_symbols & exclude:
continue
key = lambda a: a.is_Add
terms, others = split(i.args, key)
if self._opt_maxalias:
# Treat `e` as an FD expression and pull out the derivative
# coefficient from `i`
# Note: typically derivative coefficients are numbers, but
# sometimes they could be provided in symbolic form through an
# arbitrary Function. In the latter case, we rely on the
# heuristic that such Function's basically never span the whole
# grid, but rather a single Grid dimension (e.g., `c[z, n]` for a
# stencil of diameter `n` along `z`)
if e.grid is not None and terms:
key = partial(maybe_coeff_key, e.grid)
others, more_terms = split(others, key)
terms += more_terms
mapper.add(i, self._make_symbol, terms)
return mapper
def _in_writeto(self, dim, cluster):
return self._opt_maxpar and PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if naliases <= 1:
return 0
else:
return estimate_cost(e, True)*naliases // self._opt_mincost
modes = {
CireInvariantsBasic.mode: CireInvariantsBasic,
CireInvariantsCompound.mode: CireInvariantsCompound,
CireSOPS.mode: CireSOPS
}
def collect(extracted, ispace, min_storage):
"""
Find groups of aliasing expressions.
We shall introduce the following (loose) terminology:
* A ``terminal`` is the leaf of a mathematical operation. Terminals
can be numbers (n), literals (l), or Indexeds (I).
* ``R`` is the relaxation operator := ``R(n) = n``, ``R(l) = l``,
``R(I) = J``, where ``J`` has the same base as ``I`` but with all
offsets stripped away. For example, ``R(a[i+2,j-1]) = a[i,j]``.
* A ``relaxed expression`` is an expression in which all of the
terminals are relaxed.
Now we define the concept of aliasing. We say that an expression A
aliases an expression B if:
* ``R(A) == R(B)``
* all pairwise Indexeds in A and B access memory locations at a
fixed constant distance along each Dimension.
For example, consider the following expressions:
* a[i+1] + b[i+1]
* a[i+1] + b[j+1]
* a[i] + c[i]
* a[i+2] - b[i+2]
* a[i+2] + b[i]
* a[i-1] + b[i-1]
Out of the expressions above, the following alias to `a[i] + b[i]`:
* a[i+1] + b[i+1] : same operands and operations, distance along i: 1
* a[i-1] + b[i-1] : same operands and operations, distance along i: -1
Whereas the following do not:
* a[i+1] + b[j+1] : because at least one index differs
* a[i] + c[i] : because at least one of the operands differs
* a[i+2] - b[i+2] : because at least one operation differs
* a[i+2] + b[i] : because the distances along ``i`` differ (+2 and +0)
"""
# Find the potential aliases
found = []
for expr in extracted:
assert not expr.is_Equality
indexeds = retrieve_indexed(expr)
bases = []
offsets = []
for i in indexeds:
ii = IterationInstance(i)
if ii.is_irregular:
break
base = []
offset = []
for e, ai in zip(ii, ii.aindices):
if q_constant(e):
base.append(e)
else:
base.append(ai)
offset.append((ai, e - ai))
bases.append(tuple(base))
offsets.append(LabeledVector(offset))
if not indexeds or len(bases) == len(indexeds):
found.append(Candidate(expr, ispace, indexeds, bases, offsets))
# Create groups of aliasing expressions
mapper = OrderedDict()
unseen = list(found)
while unseen:
c = unseen.pop(0)
group = [c]
for u in list(unseen):
# Is the arithmetic structure of `c` and `u` equivalent ?
if not compare_ops(c.expr, u.expr):
continue
# Is `c` translated w.r.t. `u` ?
if not c.translated(u):
continue
group.append(u)
unseen.remove(u)
group = Group(group)
if min_storage:
k = group.dimensions_translated
else:
k = group.dimensions
mapper.setdefault(k, []).append(group)
aliases = AliasMapper()
queue = list(mapper.values())
while queue:
groups = queue.pop(0)
while groups:
# For each Dimension, determine the Minimum Intervals (MI) spanning
# all of the Groups diameters
# Example: x's largest_diameter=2 => [x[-2,0], x[-1,1], x[0,2]]
# Note: Groups that cannot evaluate their diameter are dropped
mapper = defaultdict(int)
for g in list(groups):
try:
mapper.update({d: max(mapper[d], v) for d, v in g.diameter.items()})
except ValueError:
groups.remove(g)
intervalss = {d: make_rotations_table(d, v) for d, v in mapper.items()}
# For each Group, find a rotation that is compatible with a given MI
mapper = {}
for d, intervals in intervalss.items():
# Not all groups may access all dimensions
# Example: `d=t` and groups=[Group(...[t, x]...), Group(...[time, x]...)]
impacted = [g for g in groups if d in g.dimensions]
for interval in list(intervals):
found = {g: g.find_rotation_distance(d, interval) for g in impacted}
if all(distance is not None for distance in found.values()):
# `interval` is OK !
mapper[interval] = found
break
if len(mapper) == len(intervalss):
break
# Try again with fewer groups
# Heuristic: first try retaining the larger ones
smallest = len(min(groups, key=len))
fallback = groups
groups, remainder = split(groups, lambda g: len(g) > smallest)
if groups:
queue.append(remainder)
elif len(remainder) > 1:
# No luck with the heuristic, e.g. there are two groups
# and both have same `len`
queue.append(fallback[1:])
groups = [fallback.pop(0)]
else:
break
for g in groups:
c = g.pivot
distances = defaultdict(int, [(i.dim, v.get(g)) for i, v in mapper.items()])
# Create the basis alias
offsets = [LabeledVector([(l, v[l] + distances[l]) for l in v.labels])
for v in c.offsets]
subs = {i: i.function[[l + v.fromlabel(l, 0) for l in b]]
for i, b, v in zip(c.indexeds, c.bases, offsets)}
alias = uxreplace(c.expr, subs)
# All aliased expressions
aliaseds = [extracted[i.expr] for i in g]
# Distance of each aliased expression from the basis alias
distances = []
for i in g:
distance = [o.distance(v) for o, v in zip(i.offsets, offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
distances.append(LabeledVector([(d, v.pop()) for d, v in distance]))
aliases.add(alias, list(mapper), aliaseds, distances)
return aliases
def choose(aliases, exprs, mapper, selector):
"""
Analyze the detected aliases and, after applying a cost model to rule out
the aliases with a bad flops/memory trade-off, inject them into the original
expressions.
"""
tot = 0
retained = AliasMapper()
# Pass 1: a set of aliasing expressions is retained only if its cost
# exceeds the mode's threshold
candidates = OrderedDict()
aliaseds = []
others = []
for e, v in aliases.items():
score = selector(e, len(v.aliaseds))
if score > 0:
candidates[e] = score
aliaseds.extend(v.aliaseds)
else:
others.append(e)
# Do not waste time if unneccesary
if not candidates:
return exprs, retained, tot
# Project the candidate aliases into exprs to determine what the new
# working set would be
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(aliaseds)}
templated = [uxreplace(e, mapper) for e in exprs]
# Pass 2: a set of aliasing expressions is retained only if the tradeoff
# between operation count reduction and working set increase is favorable
owset = wset(others + templated)
for e, v in aliases.items():
try:
score = candidates[e]
except KeyError:
score = 0
if score > 1 or \
score == 1 and max(len(wset(e)), 1) > len(wset(e) & owset):
retained[e] = v
tot += score
# Do not waste time if unneccesary
if not retained:
return exprs, retained, tot
# Substitute the chosen aliasing sub-expressions
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(retained.aliaseds)}
exprs = [uxreplace(e, mapper) for e in exprs]
return exprs, retained, tot
def lower_aliases(cluster, aliases, in_writeto, maxpar):
"""
Create a Schedule from an AliasMapper.
"""
dmapper = {}
processed = []
for alias, v in aliases.items():
imapper = {**{i.dim: i for i in v.intervals},
**{i.dim.parent: i for i in v.intervals if i.dim.is_NonlinearDerived}}
intervals = []
writeto = []
sub_iterators = {}
indicess = [[] for _ in v.distances]
for i in cluster.ispace.intervals:
try:
interval = imapper[i.dim]
except KeyError:
# E.g., `x0_blk0` or (`a[y_m+1]` => `y not in imapper`)
intervals.append(i)
continue
assert i.stamp >= interval.stamp
if not (writeto or interval != interval.zero() or in_writeto(i.dim, cluster)):
# The alias doesn't require a temporary Dimension along i.dim
intervals.append(i)
continue
assert not i.dim.is_NonlinearDerived
# `i.dim` is necessarily part of the write-to region, so
# we have to adjust the Interval's stamp. For example, consider
# `i=x[0,0]<1>` and `interval=x[-4,4]<0>`; here we need to
# use `<1>` as stamp, which is what appears in `cluster`
interval = interval.lift(i.stamp)
# We further bump the interval stamp if we were requested to trade
# fusion for more collapse-parallelism
interval = interval.lift(interval.stamp + int(maxpar))
writeto.append(interval)
intervals.append(interval)
if i.dim.is_Incr:
# Suitable IncrDimensions must be used to avoid OOB accesses.
# E.g., r[xs][ys][z] => both `xs` and `ys` must be initialized such
# that all accesses are within bounds. This requires traversing the
# hierarchy of IncrDimensions to set `xs` (`ys`) in a way that
# consecutive blocks access consecutive regions in `r` (e.g.,
# `xs=x0_blk1-x0_blk0` with `blocklevels=2`; `xs=0` with
# `blocklevels=1`, that is it degenerates in this case)
try:
d = dmapper[i.dim]
except KeyError:
dd = i.dim.parent
assert dd.is_Incr
if dd.parent.is_Incr:
# An IncrDimension in between IncrDimensions
m = i.dim.symbolic_min - i.dim.parent.symbolic_min
else:
m = 0
d = dmapper[i.dim] = IncrDimension("%ss" % i.dim.name, i.dim, m,
dd.symbolic_size, 1, dd.step)
sub_iterators[i.dim] = d
else:
d = i.dim
# Given the iteration `interval`, lower distances to indices
for distance, indices in zip(v.distances, indicess):
indices.append(d - interval.lower + distance[interval.dim])
# The alias write-to space
writeto = IterationSpace(IntervalGroup(writeto), sub_iterators)
# The alias iteration space
intervals = IntervalGroup(intervals, cluster.ispace.relations)
ispace = IterationSpace(intervals, cluster.sub_iterators, cluster.directions)
ispace = ispace.augment(sub_iterators)
processed.append(ScheduledAlias(alias, writeto, ispace, v.aliaseds, indicess))
# The [ScheduledAliases] must be ordered so as to reuse as many of the
# `cluster`'s IterationIntervals as possible in order to honor the
# write-to region. Another fundamental reason for ordering is to ensure
# deterministic code generation
processed = sorted(processed, key=lambda i: cit(cluster.ispace, i.ispace))
return Schedule(*processed, dmapper=dmapper)
def optimize_schedule(cluster, schedule, platform, sregistry, options):
"""
Rewrite the schedule for performance optimization.
"""
if options['cire-rotate']:
schedule = _optimize_schedule_rotations(schedule, sregistry)
schedule = _optimize_schedule_padding(cluster, schedule, platform)
return schedule
def _optimize_schedule_rotations(schedule, sregistry):
"""
Transform the schedule such that the tensor temporaries "rotate" along
the outermost Dimension. This trades a parallel Dimension for a smaller
working set size.
"""
# The rotations Dimension is the outermost
ridx = 0
rmapper = defaultdict(list)
processed = []
for k, group in groupby(schedule, key=lambda i: i.writeto):
g = list(group)
candidate = k[ridx]
d = candidate.dim
try:
ds = schedule.dmapper[d]
except KeyError:
# Can't do anything if `d` isn't an IncrDimension over a block
processed.extend(g)
continue
n = candidate.min_size
assert n > 0
iis = candidate.lower
iib = candidate.upper
ii = ModuloDimension('%sii' % d, ds, iis, incr=iib)
cd = CustomDimension(name='%s%s' % (d, d), symbolic_min=ii, symbolic_max=iib,
symbolic_size=n)
dsi = ModuloDimension('%si' % ds, cd, cd + ds - iis, n)
mapper = OrderedDict()
for i in g:
# Update `indicess` to use `xs0`, `xs1`, ...
mds = []
for indices in i.indicess:
v = indices[ridx]
try:
md = mapper[v]
except KeyError:
name = sregistry.make_name(prefix='%sr' % d.name)
md = mapper.setdefault(v, ModuloDimension(name, ds, v, n))
mds.append(md)
indicess = [indices[:ridx] + [md] + indices[ridx + 1:]
for md, indices in zip(mds, i.indicess)]
# Update `writeto` by switching `d` to `dsi`
intervals = k.intervals.switch(d, dsi).zero(dsi)
sub_iterators = dict(k.sub_iterators)
sub_iterators[d] = dsi
writeto = IterationSpace(intervals, sub_iterators)
# Transform `alias` by adding `i`
alias = i.alias.xreplace({d: d + cd})
# Extend `ispace` to iterate over rotations
d1 = writeto[ridx+1].dim # Note: we're by construction in-bounds here
intervals = IntervalGroup(Interval(cd, 0, 0), relations={(d, cd, d1)})
rispace = IterationSpace(intervals, {cd: dsi}, {cd: Forward})
aispace = i.ispace.zero(d)
aispace = aispace.augment({d: mds + [ii]})
ispace = IterationSpace.union(rispace, aispace)
processed.append(ScheduledAlias(alias, writeto, ispace, i.aliaseds, indicess))
# Update the rotations mapper
rmapper[d].extend(list(mapper.values()))
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=rmapper)
def _optimize_schedule_padding(cluster, schedule, platform):
"""
Round up the innermost IterationInterval of the tensor temporaries IterationSpace
to a multiple of the SIMD vector length. This is not always possible though (it
depends on how much halo is safely accessible in all read Functions).
"""
processed = []
for i in schedule:
try:
it = i.ispace.itintervals[-1]
if ROUNDABLE in cluster.properties[it.dim]:
vl = platform.simd_items_per_reg(cluster.dtype)
ispace = i.ispace.add(Interval(it.dim, 0, it.interval.size % vl))
else:
ispace = i.ispace
processed.append(ScheduledAlias(i.alias, i.writeto, ispace, i.aliaseds,
i.indicess))
except (TypeError, KeyError):
processed.append(i)
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=schedule.rmapper)
def lower_schedule(cluster, schedule, sregistry, options):
"""
Turn a Schedule into a sequence of Clusters.
"""
ftemps = options['cire-ftemps']
if ftemps:
make = TempFunction
else:
# Typical case -- the user does *not* "see" the CIRE-created temporaries
make = Array
clusters = []
subs = {}
for alias, writeto, ispace, aliaseds, indicess in schedule:
# Basic info to create the temporary that will hold the alias
name = sregistry.make_name()
dtype = cluster.dtype
if writeto:
# The Dimensions defining the shape of Array
# Note: with SubDimensions, we may have the following situation:
#
# for zi = z_m + zi_ltkn; zi <= z_M - zi_rtkn; ...
# r[zi] = ...
#
# Instead of `r[zi - z_m - zi_ltkn]` we have just `r[zi]`, so we'll need
# as much room as in `zi`'s parent to avoid going OOB
# Aside from ugly generated code, the reason we do not rather shift the
# indices is that it prevents future passes to transform the loop bounds
# (e.g., MPI's comp/comm overlap does that)
dimensions = [d.parent if d.is_Sub else d for d in writeto.itdimensions]
# The halo must be set according to the size of writeto space
halo = [(abs(i.lower), abs(i.upper)) for i in writeto]
# The indices used to write into the Array
indices = []
for i in writeto:
try:
# E.g., `xs`
sub_iterators = writeto.sub_iterators[i.dim]
assert len(sub_iterators) == 1
indices.append(sub_iterators[0])
except KeyError:
# E.g., `z` -- a non-shifted Dimension
indices.append(i.dim - i.lower)
obj = make(name=name, dimensions=dimensions, halo=halo, dtype=dtype)
expression = Eq(obj[indices], alias)
callback = lambda idx: obj[idx]
else:
# Degenerate case: scalar expression
assert writeto.size == 0
obj = Symbol(name=name, dtype=dtype)
expression = Eq(obj, alias)
callback = lambda idx: obj
# Create the substitution rules for the aliasing expressions
subs.update({aliased: callback(indices)
for aliased, indices in zip(aliaseds, indicess)})
# Construct the `alias` DataSpace
accesses = detect_accesses(expression)
parts = {k: IntervalGroup(build_intervals(v)).add(ispace.intervals).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
# Drop or weaken parallelism if necessary
properties = dict(cluster.properties)
for d, v in cluster.properties.items():
if any(i.is_Modulo for i in ispace.sub_iterators[d]):
properties[d] = normalize_properties(v, {SEQUENTIAL})
elif d not in writeto.dimensions:
properties[d] = normalize_properties(v, {PARALLEL_IF_PVT})
# Finally, build the `alias` Cluster
clusters.append(cluster.rebuild(exprs=expression, ispace=ispace,
dspace=dspace, properties=properties))
return clusters, subs
def pick_best(variants):
"""
Use the variant score and heuristics to return the variant with the best
trade-off between operation count reduction and working set increase.
"""
best = variants.pop(0)
for i in variants:
best_flop_score, best_ws_score = best.score
if best_flop_score == 0:
best = i
continue
i_flop_score, i_ws_score = i.score
# The current heustic is fairly basic: the one with smaller working
# set size increase wins, unless there's a massive reduction in operation
# count in the other one
delta = i_ws_score - best_ws_score
if (delta > 0 and i_flop_score / best_flop_score > 100) or \
(delta == 0 and i_flop_score > best_flop_score) or \
(delta < 0 and best_flop_score / i_flop_score <= 100):
best = i
schedule, exprs, _ = best
return schedule, exprs
def rebuild(cluster, exprs, subs, schedule):
"""
Plug the optimized aliases into the input Cluster. This leads to creating
a new Cluster with suitable IterationSpace and DataSpace.
"""
exprs = [uxreplace(e, subs) for e in exprs]
ispace = cluster.ispace.augment(schedule.dmapper)
ispace = ispace.augment(schedule.rmapper)
accesses = detect_accesses(exprs)
parts = {k: IntervalGroup(build_intervals(v)).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
return cluster.rebuild(exprs=exprs, ispace=ispace, dspace=dspace)
# Utilities
class Candidate(object):
def __init__(self, expr, ispace, indexeds, bases, offsets):
self.expr = expr
self.shifts = ispace.intervals
self.indexeds = indexeds
self.bases = bases
self.offsets = offsets
def __repr__(self):
return "Candidate(expr=%s)" % self.expr
def translated(self, other):
"""
True if ``self`` is translated w.r.t. ``other``, False otherwise.
Examples
--------
Two candidates are translated if their bases are the same and
their offsets are pairwise translated.
c := A[i,j] op A[i,j+1] -> Toffsets = {i: [0,0], j: [0,1]}
u := A[i+1,j] op A[i+1,j+1] -> Toffsets = {i: [1,1], j: [0,1]}
Then `c` is translated w.r.t. `u` with distance `{i: 1, j: 0}`
"""
if len(self.Toffsets) != len(other.Toffsets):
return False
if len(self.bases) != len(other.bases):
return False
# Check the bases
if any(b0 != b1 for b0, b1 in zip(self.bases, other.bases)):
return False
# Check the offsets
for (d0, o0), (d1, o1) in zip(self.Toffsets, other.Toffsets):
if d0 is not d1:
return False
distance = set(o0 - o1)
if len(distance) != 1:
return False
return True
@cached_property
def Toffsets(self):
return LabeledVector.transpose(*self.offsets)
@cached_property
def dimensions(self):
return frozenset(i for i, _ in self.Toffsets)
class Group(tuple):
"""
A collection of aliasing expressions.
"""
def __repr__(self):
return "Group(%s)" % ", ".join([str(i) for i in self])
def find_rotation_distance(self, d, interval):
"""
The distance from the Group pivot of a rotation along Dimension ``d`` that
can safely iterate over the ``interval``.
"""
assert d is interval.dim
for rotation, distance in self._pivot_legal_rotations[d]:
# Does `rotation` cover the `interval` ?
if rotation.union(interval) != rotation:
continue
# Infer the `rotation`'s min_intervals from the pivot's
min_interval = self._pivot_min_intervals[d].translate(-distance)
# Does the `interval` actually cover the `rotation`'s `min_interval`?
if interval.union(min_interval) == interval:
return distance
return None
@cached_property
def Toffsets(self):
return [LabeledVector.transpose(*i) for i in zip(*[i.offsets for i in self])]
@cached_property
def diameter(self):
"""
The size of the iteration space required to evaluate all aliasing expressions
in this Group, along each Dimension.
"""
ret = defaultdict(int)
for i in self.Toffsets:
for d, v in i:
try:
distance = int(max(v) - min(v))
except TypeError:
# An entry in `v` has symbolic components, e.g. `x_m + 2`
if len(set(v)) == 1:
continue
else:
raise ValueError
ret[d] = max(ret[d], distance)
return ret
@property
def pivot(self):
"""
A deterministically chosen Candidate for this Group.
"""
return self[0]
@property
def dimensions(self):
return self.pivot.dimensions
@property
def dimensions_translated(self):
return frozenset(d for d, v in self.diameter.items() if v > 0)
@cached_property
def _pivot_legal_rotations(self):
"""
All legal rotations along each Dimension for the Group pivot.
"""
ret = {}
for d, (maxd, mini) in self._pivot_legal_shifts.items():
# Rotation size = mini (min-increment) - maxd (max-decrement)
v = mini - maxd
# Build the table of all possible rotations
m = make_rotations_table(d, v)
distances = []
for rotation in m:
# Distance of the rotation `i` from `c`
distance = maxd - rotation.lower
assert distance == mini - rotation.upper
distances.append(distance)
ret[d] = list(zip(m, distances))
return ret
@cached_property
def _pivot_min_intervals(self):
"""
The minimum Interval along each Dimension such that by evaluating the
pivot, all Candidates are evaluated too.
"""
c = self.pivot
ret = defaultdict(lambda: [np.inf, -np.inf])
for i in self:
distance = [o.distance(v) for o, v in zip(i.offsets, c.offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
for d, v in distance:
value = v.pop()
ret[d][0] = min(ret[d][0], value)
ret[d][1] = max(ret[d][1], value)
ret = {d: Interval(d, m, M) for d, (m, M) in ret.items()}
return ret
@cached_property
def _pivot_legal_shifts(self):
"""
The max decrement and min increment along each Dimension such that the
Group pivot does not go OOB.
"""
c = self.pivot
ret = defaultdict(lambda: (-np.inf, np.inf))
for i, ofs in zip(c.indexeds, c.offsets):
f = i.function
for l in ofs.labels:
# `f`'s cumulative halo size along `l`
hsize = sum(f._size_halo[l])
# Any `ofs`'s shift due to non-[0,0] iteration space
lower, upper = c.shifts[l].offsets
try:
# Assume `ofs[d]` is a number (typical case)
maxd = min(0, max(ret[l][0], -ofs[l] - lower))
mini = max(0, min(ret[l][1], hsize - ofs[l] - upper))
ret[l] = (maxd, mini)
except TypeError:
# E.g., `ofs[d] = x_m - x + 5`
ret[l] = (0, 0)
return ret
AliasedGroup = namedtuple('AliasedGroup', 'intervals aliaseds distances')
ScheduledAlias = namedtuple('ScheduledAlias', 'alias writeto ispace aliaseds indicess')
ScheduledAlias.__new__.__defaults__ = (None,) * len(ScheduledAlias._fields)
SpacePoint = namedtuple('SpacePoint', 'schedule exprs score')
class Schedule(tuple):
def __new__(cls, *items, dmapper=None, rmapper=None):
obj = super(Schedule, cls).__new__(cls, items)
obj.dmapper = dmapper or {}
obj.rmapper = rmapper or {}
return obj
class AliasMapper(OrderedDict):
def add(self, alias, intervals, aliaseds, distances):
assert len(aliaseds) == len(distances)
self[alias] = AliasedGroup(intervals, aliaseds, distances)
def update(self, aliases):
for k, v in aliases.items():
try:
v0 = self[k]
if v0.intervals != v.intervals:
raise ValueError
v0.aliaseds.extend(v.aliaseds)
v0.distances.extend(v.distances)
except KeyError:
self[k] = v
@property
def aliaseds(self):
return flatten(i.aliaseds for i in self.values())
def make_rotations_table(d, v):
"""
All possible rotations of `range(v+1)`.
"""
m = np.array([[j-i if j > i else 0 for j in range(v+1)] for i in range(v+1)])
m = (m - m.T)[::-1, :]
# Shift the table so that the middle rotation is at the top
m = np.roll(m, int(-np.floor(v/2)), axis=0)
# Turn into a more compact representation as a list of Intervals
m = [Interval(d, min(i), max(i)) for i in m]
return m
def cit(ispace0, ispace1):
"""
The Common IterationIntervals of two IterationSpaces.
"""
found = []
for it0, it1 in zip(ispace0.itintervals, ispace1.itintervals):
if it0 == it1:
found.append(it0)
else:
break
return tuple(found)
def maybe_coeff_key(grid, expr):
"""
True if `expr` could be the coefficient of an FD derivative, False otherwise.
"""
if expr.is_Number:
return True
indexeds = [i for i in expr.free_symbols if i.is_Indexed]
return any(not set(grid.dimensions) <= set(i.function.dimensions) for i in indexeds)
def wset(exprs):
"""
Extract the working set out of a set of equations.
"""
return {i.function for i in flatten([e.free_symbols for e in as_tuple(exprs)])
if i.function.is_AbstractFunction}
def potential_max_deriv_order(exprs):
"""
The maximum FD derivative order in a list of expressions.
"""
# NOTE: e might propagate the Derivative(...) information down from the
# symbolic language, but users may do crazy things and write their own custom
# expansions "by hand" (i.e., not resorting to Derivative(...)), hence instead
# of looking for Derivative(...) we use the following heuristic:
# add(mul, mul, ...) -> stems from first order derivative
# add(mul(add(mul, mul, ...), ...), ...) -> stems from second order derivative
# ...
nadds = lambda e: (int(e.is_Add) +
max([nadds(a) for a in e.args], default=0) if not q_leaf(e) else 0)
return max([nadds(e) for e in exprs], default=0)
def search_potential_deriv(expr, n, c=0):
"""
Retrieve the expressions at depth `n` that potentially stem from FD derivatives.
"""
assert n >= c >= 0
if q_leaf(expr) or expr.is_Pow:
return []
elif expr.is_Mul:
if c == n:
return [expr]
else:
return flatten([search_potential_deriv(a, n, c+1) for a in expr.args])
else:
return flatten([search_potential_deriv(a, n, c) for a in expr.args])
| 35.173189 | 90 | 0.581848 | [
"MIT"
] | ccuetom/devito | devito/passes/clusters/aliases.py | 42,243 | Python |
#!/usr/bin/env python
import os
import sys
import platform
import tempfile
import urllib2
import shutil
import boto
from boto.exception import BotoClientError
def merge_dicts(a, b):
"""
Merge two dictionaries. If there is a key collision, `b` overrides `a`.
:param a: Dictionary of default settings
:param b: Dictionary of override settings
:rtype : dict
"""
try:
a.update(b)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Failed to merge dictionaries. Dictionary A:\n\n'
'{0}\n\n'
'Dictionary B:\n\n'
'{1}\n\n'
'Exception: {2}'
.format(a, b, exc))
return a
def get_scripts_to_execute(system, workingdir, **scriptparams):
"""
Returns an array of hashtables. Each hashtable has two keys: 'ScriptUrl' and 'Parameters'.
'ScriptSource' is the path to the script to be executed. Only supports http/s sources currently.
'Parameters' is a hashtable of parameters to pass to the script.
Use `merge_dicts({yourdict}, scriptparams)` to merge command line parameters with a set of default parameters.
:param system: str, the system type as returned from `platform.system`
:param workingdir: str, the working directory where content should be saved
:param scriptparams: dict, parameters passed to the master script which should be relayed to the content scripts
:rtype : dict
"""
if 'Linux' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/systemprep-linuxyumrepoinstall.py",
'Parameters': merge_dicts({
'yumrepomap': [
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-amzn.repo',
'dist': 'amazon',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'redhat',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'centos',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'redhat',
'epel_version': '7',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'centos',
'epel_version': '7',
},
],
}, scriptparams)
},
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/SystemPrep-LinuxSaltInstall.py",
'Parameters': merge_dicts({
'saltinstallmethod': 'yum',
'saltcontentsource': "https://systemprep-content.s3.amazonaws.com/linux/salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-linux-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/join-domain-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/scc-formula-master.zip",
"https://s3.amazonaws.com/salt-formulas/name-computer-formula-master.zip",
],
'formulaterminationstrings': [
"-master",
"-latest",
],
'saltstates': 'Highstate',
'entenv': 'False',
'salt_results_log': '/var/log/saltcall.results.log',
'salt_debug_log': '/var/log/saltcall.debug.log',
'sourceiss3bucket': 'True',
}, scriptparams)
},
)
elif 'Windows' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/SystemPrep-WindowsSaltInstall.ps1",
'Parameters': merge_dicts({
'saltworkingdir': '{0}\\SystemContent\\Windows\\Salt'.format(workingdir),
'saltcontentsource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-windows-formula-master.zip",
],
'formulaterminationstrings': [
"-latest",
],
'ashrole': "MemberServer",
'entenv': 'False',
'saltstates': "Highstate",
}, scriptparams)
},
)
else:
#TODO: Update `except` logic
raise SystemError('System, {0}, is not recognized?'.format(system))
return scriptstoexecute
def create_working_dir(basedir, dirprefix):
"""
Creates a directory in `basedir` with a prefix of `dirprefix`.
The directory will have a random 5 character string appended to `dirprefix`.
Returns the path to the working directory.
:rtype : str
:param basedir: str, the directory in which to create the working directory
:param dirprefix: str, prefix to prepend to the working directory
"""
workingdir = None
try:
workingdir = tempfile.mkdtemp(prefix=dirprefix, dir=basedir)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Could not create workingdir in {0}.\n'
'Exception: {1}'.format(basedir, exc))
return workingdir
def get_system_params(system):
"""
Returns a dictionary of OS platform-specific parameters.
:param system: str, the system type as returned by `platform.system`
:rtype : dict
"""
a = {}
workingdirprefix = 'systemprep-'
if 'Linux' in system:
tempdir = '/usr/tmp/'
a['pathseparator'] = '/'
a['readyfile'] = '/var/run/system-is-ready'
a['restart'] = 'shutdown -r +1 &'
elif 'Windows' in system:
#TODO: Add and test the Windows parameters/functionality
systemroot = os.environ['SYSTEMROOT']
systemdrive = os.environ['SYSTEMDRIVE']
tempdir = os.environ['TEMP']
a['pathseparator'] = '\\'
a['readyfile'] = '{0}\system-is-ready'.format(systemdrive)
a['restart'] = '{0}\system32\shutdown.exe/r /t 30 /d p:2:4 /c "SystemPrep complete. Rebooting computer."'.format(systemroot)
else:
#TODO: Update `except` logic
raise SystemError('System, {0}, is not recognized?'.format(system))
a['workingdir'] = create_working_dir(tempdir, workingdirprefix)
return a
def download_file(url, filename, sourceiss3bucket=None):
"""
Download the file from `url` and save it locally under `filename`.
:rtype : bool
:param url:
:param filename:
:param sourceiss3bucket:
"""
conn = None
if sourceiss3bucket:
bucket_name = url.split('/')[3]
key_name = '/'.join(url.split('/')[4:])
try:
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except (NameError, BotoClientError):
try:
bucket_name = url.split('/')[2].split('.')[0]
key_name = '/'.join(url.split('/')[3:])
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
print('Downloaded file from S3 bucket -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
else:
try:
response = urllib2.urlopen(url)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Unable to download file from web server.\n'
'url = {0}\n'
'filename = {1}\n'
'Exception: {2}'
.format(url, filename, exc))
print('Downloaded file from web server -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
return True
def cleanup(workingdir):
"""
Removes temporary files loaded to the system.
:param workingdir: str, Path to the working directory
:return: bool
"""
print('+-' * 40)
print('Cleanup Time...')
try:
shutil.rmtree(workingdir)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Cleanup Failed!\n'
'Exception: {0}'.format(exc))
print('Removed temporary data in working directory -- ' + workingdir)
print('Exiting cleanup routine...')
print('-+' * 40)
return True
def main(noreboot = 'false', **kwargs):
"""
Master script that calls content scripts to be deployed when provisioning systems
"""
# NOTE: Using __file__ may freeze if trying to build an executable, e.g. via py2exe.
# NOTE: Using __file__ does not work if running from IDLE/interpreter.
# NOTE: __file__ may return relative path as opposed to an absolute path, so include os.path.abspath.
scriptname = ''
if '__file__' in dir():
scriptname = os.path.abspath(__file__)
else:
scriptname = os.path.abspath(sys.argv[0])
# Check special parameter types
noreboot = 'true' == noreboot.lower()
sourceiss3bucket = 'true' == kwargs.get('sourceiss3bucket', 'false').lower()
print('+' * 80)
print('Entering script -- {0}'.format(scriptname))
print('Printing parameters --')
print(' noreboot = {0}'.format(noreboot))
for key, value in kwargs.items():
print(' {0} = {1}'.format(key, value))
system = platform.system()
systemparams = get_system_params(system)
scriptstoexecute = get_scripts_to_execute(system, systemparams['workingdir'], **kwargs)
#Loop through each 'script' in scriptstoexecute
for script in scriptstoexecute:
url = script['ScriptSource']
filename = url.split('/')[-1]
fullfilepath = systemparams['workingdir'] + systemparams['pathseparator'] + filename
#Download each script, script['ScriptSource']
download_file(url, fullfilepath, sourceiss3bucket)
#Execute each script, passing it the parameters in script['Parameters']
#TODO: figure out if there's a better way to call and execute the script
print('Running script -- ' + script['ScriptSource'])
print('Sending parameters --')
for key, value in script['Parameters'].items():
print(' {0} = {1}'.format(key, value))
paramstring = ' '.join("%s='%s'" % (key, val) for (key, val) in script['Parameters'].iteritems())
fullcommand = 'python {0} {1}'.format(fullfilepath, paramstring)
result = os.system(fullcommand)
if result is not 0:
message = 'Encountered an unrecoverable error executing a ' \
'content script. Exiting with failure.\n' \
'Command executed: {0}' \
.format(fullcommand)
raise SystemError(message)
cleanup(systemparams['workingdir'])
if noreboot:
print('Detected `noreboot` switch. System will not be rebooted.')
else:
print('Reboot scheduled. System will reboot after the script exits.')
os.system(systemparams['restart'])
print('{0} complete!'.format(scriptname))
print('-' * 80)
if "__main__" == __name__:
# Convert command line parameters of the form `param=value` to a dictionary.
# NOTE: Keys are stored in lowercase format.
kwargs = {}
for x in sys.argv[1:]:
if '=' in x:
[key, value] = x.split('=', 1)
kwargs[key.lower()] = value
else:
message = 'Encountered a parameter that does not have = in it.'
raise SystemError(message)
# NOTE: We are unpacking kwargs to obtain the noreboot parameter for the main
# definition. The rest are packed back into kwargs.
# TODO: This is not necessary and consumes a minor overhead. I would just pass along the dictionary.
# However, since we will be moving to using argparse, this will become obsolete.
main(**kwargs)
| 41.507205 | 133 | 0.545789 | [
"Apache-2.0"
] | plus3it/SystemPrep | MasterScripts/systemprep-linuxmaster.py | 14,403 | Python |
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
########################################################################
# FirePeriodTable
#
# Type: table
# Edit Areas: solicited from user
# Weather Elements: You must have these Weather elements defined in
# your server: Sky, LAL, RelHum, MaxT, MinT, FreeWind,
# Haines, TransWind, MixHgt(ft AGL)
# To Run:
# Set GFE Time Range
# Products-->Generate Products
# Choose Edit Areas
# Select OK
#
########################################################################
## EXAMPLE OUTPUT (Scarce Data)
## Fire Period Table for Feb 29 00 17:00:00 GMT - Mar 01 00 11:00:00 GMT.
## Edit Area Sky (%) LAL RelHum (%) MaxT MinT FreeWind(mph) Haines TransWind(mph) MixHgt(ft AGL)
## COAdams 36-23 46 26
## COArapahoe 34-24 46 26
## COBoulder 31-52 34 18
## COClearCreek 16-57 26 12
## CODenver 37-40 43 25
## CODouglas 24-47 40 21
## COElbert 31-22 46 25
########################################################################
Definition = {
"type": "table",
"displayName": "TEST_Fire Period Table", # for Product Generation Menu
# Output file for product results
"outputFile": "./FirePeriodTable.txt", # default output file
"constantVariable": "TimePeriod",
"rowVariable": "EditArea",
"columnVariable": "WeatherElement",
"beginningText": "Fire Period Table for %TimePeriod. \n\n",
"endingText": "",
# Edit Areas
"defaultEditAreas" : [("area1","Area 1"),("area2","Area 2")],
"runTimeEditAreas": "yes",
"areaType" : "Edit Area", # E.g. City, County, Basin, etc.
# Time Ranges
"defaultRanges": ["Today"],
"runTimeRanges" : "no", # if yes, ask user at run time
"elementList": [
("Sky", "Sky (%)",
"minMax",
"range2Value",
"Scalar", 1, None),
("LAL","LAL",
"minMax",
"range2Value",
"Scalar",1,None),
("MaxT","MaxT",
"avg",
"singleValue",
"Scalar", 1, None),
("MinT","MinT",
"avg",
"singleValue",
"Scalar", 1, None),
("FreeWind","FreeWind(mph)",
"vectorRange",
"range2Value",
"Vector", 1, "ktToMph"),
("Haines","Haines",
"minMax",
"range2Value",
"Scalar",1,None),
("TransWind","TransWind(mph)",
"vectorRange",
"range2Value",
"Vector", 1, "ktToMph"),
("MixHgt", "MixHgt(ft AGL)",
"minMax",
"range2Value",
"Scalar",10,None),
],
}
| 34.720721 | 112 | 0.463415 | [
"Apache-2.0"
] | drjoeycadieux/awips2 | cave/com.raytheon.viz.gfe/python/testFormatters/FirePeriodTable.py | 3,854 | Python |
# Python stubs generated by omniidl from /tmp/corba/omni/share/idl/omniORB/COS/CosObjectIdentity.idl
# DO NOT EDIT THIS FILE!
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(4,2, __file__, 1)
try:
property
except NameError:
def property(*args):
return None
#
# Start of module "CosObjectIdentity"
#
__name__ = "CosObjectIdentity"
_0_CosObjectIdentity = omniORB.openModule("CosObjectIdentity", r"/tmp/corba/omni/share/idl/omniORB/COS/CosObjectIdentity.idl")
_0_CosObjectIdentity__POA = omniORB.openModule("CosObjectIdentity__POA", r"/tmp/corba/omni/share/idl/omniORB/COS/CosObjectIdentity.idl")
# typedef ... ObjectIdentifier
class ObjectIdentifier:
_NP_RepositoryId = "IDL:omg.org/CosObjectIdentity/ObjectIdentifier:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosObjectIdentity.ObjectIdentifier = ObjectIdentifier
_0_CosObjectIdentity._d_ObjectIdentifier = omniORB.tcInternal.tv_ulong
_0_CosObjectIdentity._ad_ObjectIdentifier = (omniORB.tcInternal.tv_alias, ObjectIdentifier._NP_RepositoryId, "ObjectIdentifier", omniORB.tcInternal.tv_ulong)
_0_CosObjectIdentity._tc_ObjectIdentifier = omniORB.tcInternal.createTypeCode(_0_CosObjectIdentity._ad_ObjectIdentifier)
omniORB.registerType(ObjectIdentifier._NP_RepositoryId, _0_CosObjectIdentity._ad_ObjectIdentifier, _0_CosObjectIdentity._tc_ObjectIdentifier)
del ObjectIdentifier
# interface IdentifiableObject
_0_CosObjectIdentity._d_IdentifiableObject = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosObjectIdentity/IdentifiableObject:1.0", "IdentifiableObject")
omniORB.typeMapping["IDL:omg.org/CosObjectIdentity/IdentifiableObject:1.0"] = _0_CosObjectIdentity._d_IdentifiableObject
_0_CosObjectIdentity.IdentifiableObject = omniORB.newEmptyClass()
class IdentifiableObject :
_NP_RepositoryId = _0_CosObjectIdentity._d_IdentifiableObject[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosObjectIdentity.IdentifiableObject = IdentifiableObject
_0_CosObjectIdentity._tc_IdentifiableObject = omniORB.tcInternal.createTypeCode(_0_CosObjectIdentity._d_IdentifiableObject)
omniORB.registerType(IdentifiableObject._NP_RepositoryId, _0_CosObjectIdentity._d_IdentifiableObject, _0_CosObjectIdentity._tc_IdentifiableObject)
# IdentifiableObject operations and attributes
IdentifiableObject._d__get_constant_random_id = ((),(omniORB.typeMapping["IDL:omg.org/CosObjectIdentity/ObjectIdentifier:1.0"],),None)
IdentifiableObject._d_is_identical = ((omniORB.typeMapping["IDL:omg.org/CosObjectIdentity/IdentifiableObject:1.0"], ), (omniORB.tcInternal.tv_boolean, ), None)
# IdentifiableObject object reference
class _objref_IdentifiableObject (CORBA.Object):
_NP_RepositoryId = IdentifiableObject._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def _get_constant_random_id(self, *args):
return self._obj.invoke("_get_constant_random_id", _0_CosObjectIdentity.IdentifiableObject._d__get_constant_random_id, args)
constant_random_id = property(_get_constant_random_id)
def is_identical(self, *args):
return self._obj.invoke("is_identical", _0_CosObjectIdentity.IdentifiableObject._d_is_identical, args)
omniORB.registerObjref(IdentifiableObject._NP_RepositoryId, _objref_IdentifiableObject)
_0_CosObjectIdentity._objref_IdentifiableObject = _objref_IdentifiableObject
del IdentifiableObject, _objref_IdentifiableObject
# IdentifiableObject skeleton
__name__ = "CosObjectIdentity__POA"
class IdentifiableObject (PortableServer.Servant):
_NP_RepositoryId = _0_CosObjectIdentity.IdentifiableObject._NP_RepositoryId
_omni_op_d = {"_get_constant_random_id": _0_CosObjectIdentity.IdentifiableObject._d__get_constant_random_id, "is_identical": _0_CosObjectIdentity.IdentifiableObject._d_is_identical}
IdentifiableObject._omni_skeleton = IdentifiableObject
_0_CosObjectIdentity__POA.IdentifiableObject = IdentifiableObject
omniORB.registerSkeleton(IdentifiableObject._NP_RepositoryId, IdentifiableObject)
del IdentifiableObject
__name__ = "CosObjectIdentity"
#
# End of module "CosObjectIdentity"
#
__name__ = "CosObjectIdentity_idl"
_exported_modules = ( "CosObjectIdentity", )
# The end.
| 43.227723 | 185 | 0.830508 | [
"MIT"
] | akaszynski/ansys_corba | ansys_corba/omniORB/COS/CosObjectIdentity_idl.py | 4,366 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there is a double-spend conflict."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.zerohourconfig import INITIAL_BLOCK_REWARD
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super().setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 25*INITIAL_BLOCK_REWARD
# All nodes should be out of IBD.
# If the nodes are not all out of IBD, that can interfere with
# blockchain sync later in the test when nodes are connected, due to
# timing issues.
for n in self.nodes:
assert n.getblockchaininfo()["initialblockdownload"] == False
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
spend_from_foo = starting_balance - INITIAL_BLOCK_REWARD*5
spend_from_bar = INITIAL_BLOCK_REWARD*5 - 100
spend_from_doublespend = spend_from_foo + spend_from_bar - 8
# Assign coins to foo and bar addresses:
node0_address_foo = self.nodes[0].getnewaddress()
fund_foo_txid = self.nodes[0].sendtoaddress(node0_address_foo, spend_from_foo)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress()
fund_bar_txid = self.nodes[0].sendtoaddress(node0_address_bar, spend_from_bar)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(),
starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress()
# First: use raw transaction API to send 1240 BTC to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, spend_from_foo)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, spend_from_bar)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = spend_from_doublespend
outputs[change_address] = spend_from_foo + spend_from_bar - spend_from_doublespend + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransactionwithwallet(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 BTC coin each
txid1 = self.nodes[0].sendtoaddress(node1_address, (INITIAL_BLOCK_REWARD/5) * 4)
txid2 = self.nodes[0].sendtoaddress(node1_address, (INITIAL_BLOCK_REWARD/5) * 2)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus 40, minus 20, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block:
expected += INITIAL_BLOCK_REWARD
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance(), starting_balance - (tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100BTC for
# two more matured blocks, minus 1240 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 2*INITIAL_BLOCK_REWARD - spend_from_doublespend + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
# fees (which are negative)
assert_equal(self.nodes[0].getbalance(), starting_balance
- spend_from_doublespend
+ 2*INITIAL_BLOCK_REWARD
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
if __name__ == '__main__':
TxnMallTest().main()
| 46.178808 | 145 | 0.641761 | [
"MIT"
] | zerohourcash/zerohourcash | test/functional/wallet_txn_doublespend.py | 6,973 | Python |
git clone
git add 文件/文件夹
git commit -m '提交说明'
git push orgin('默认仓库名') master(分支名)
| 16.4 | 35 | 0.719512 | [
"Apache-2.0"
] | demengliu/network | test.py | 116 | Python |
import time
try:
import wandb
wandb_log=True
except ImportError:
wandb_log=False
import numpy as np
from advbench.lib.plotting import plot_perturbed_wandb
from einops import rearrange
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, avg_mom=0.5):
self.avg_mom = avg_mom
self.reset()
self.print = True
def reset(self):
self.val = 0
self.avg = 0 # running average of whole epoch
self.smooth_avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.smooth_avg = val if self.count == 0 else self.avg*self.avg_mom + val*(1-self.avg_mom)
self.avg = self.sum / self.count
class TimeMeter:
def __init__(self):
self.batch_time = AverageMeter()
self.data_time = AverageMeter()
self.start = time.time()
def batch_start(self):
self.data_time.update(time.time() - self.start)
def batch_end(self):
self.batch_time.update(time.time() - self.start)
self.start = time.time()
if wandb:
class WBHistogramMeter:
def __init__(self, name):
self.print = False
self.name = name
def reset(self):
pass
def update(self, val):
wandb.log({self.name: wandb.Histogram(val)})
class WBDeltaMeter(WBHistogramMeter):
def __init__(self, names = [], dims = 0, max_points = 100):
self.max_points = max_points
self.print = False
self.dims = dims
if isinstance(names, str):
names = [f"{names} {i}" for i in range(dims)]
self.meters = [WBHistogramMeter(name) for name in names]
def reset(self):
pass
def update(self, vals):
if self.dims>3:
pass
elif len(vals.shape)==3:
for i in range(len(self.meters)):
self.meters[i].update(vals[:,i,:self.max_points].flatten())
else:
for i in range(len(vals[0])):
self.meters[i].update(vals[:,i])
class WBLinePlotMeter():
def __init__(self, name):
self.print = False
self.name = name
def reset(self):
pass
def update(self, grid, vals):
plot_perturbed_wandb(grid, vals, name=self.name)
class WBDualMeter(WBHistogramMeter):
def __init__(self, grid, translations, names = "dual vs angle", locs = [(0, 0), (-1,-1)], log_every=500):
self.print = False
self.locs = []
tx, ty = translations
for loc in locs:
self.locs.append((grid[:,1]==tx[loc[0]])&(grid[:,2]==ty[loc[1]]))
if isinstance(names, str):
names = [f"{names} {grid[i[0], 1].detach().cpu().item(), grid[i[0], 2].detach().cpu().item()}" for i in locs]
self.grid = grid
self.meters = [WBLinePlotMeter(name) for name in names]
self.log_every = log_every
self.counter = 0
def reset(self):
self.counter=0
def update(self, vals):
if self.counter%self.log_every == 0:
print("*"*10)
print("log")
for i in range(len(self.locs)):
self.meters[i].update(self.grid[self.locs[i], 0].detach().cpu().numpy(), vals[self.locs[i]].detach().cpu().numpy())
self.counter+=1
else:
class WBHistogramMeter:
def __init__(self, name):
self.print = False
def reset(self):
pass
def update(self, val):
pass
class WBDeltaMeter(WBHistogramMeter):
def __init__(self,names = [], dims = 0):
self.print = False
| 29.923664 | 135 | 0.53801 | [
"MIT"
] | constrainedlearning/advbench | advbench/lib/meters.py | 3,920 | Python |
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from pathlib import Path
from typing import Any
from uuid import UUID
from .datetime_parse import parse_date, parse_datetime, parse_duration, parse_time
from .exceptions import ConfigError, type_display
NoneType = type(None)
def display_as_type(v):
return type_display(type(v))
def not_none_validator(v):
if v is None:
raise TypeError('None is not an allow value')
return v
def str_validator(v) -> str:
if isinstance(v, (str, NoneType)):
return v
elif isinstance(v, (bytes, bytearray)):
return v.decode()
elif isinstance(v, (float, int, Decimal)):
# is there anything else we want to add here? If you think so, create an issue.
return str(v)
else:
raise TypeError(f'str or byte type expected not {display_as_type(v)}')
def bytes_validator(v) -> bytes:
if isinstance(v, (bytes, NoneType)):
return v
return str_validator(v).encode()
BOOL_STRINGS = {
'1',
'TRUE',
'ON',
'YES',
}
def bool_validator(v) -> bool:
if isinstance(v, bool):
return v
if isinstance(v, bytes):
v = v.decode()
if isinstance(v, str):
return v.upper() in BOOL_STRINGS
return bool(v)
def number_size_validator(v, config, **kwargs):
if config.min_number_size <= v <= config.max_number_size:
return v
raise ValueError(f'size not in range {config.min_number_size} to {config.max_number_size}')
def anystr_length_validator(v, config, **kwargs):
if v is None or config.min_anystr_length <= len(v) <= config.max_anystr_length:
return v
raise ValueError(f'length {len(v)} not in range {config.min_anystr_length} to {config.max_anystr_length}')
def ordered_dict_validator(v) -> OrderedDict:
if isinstance(v, OrderedDict):
return v
return OrderedDict(v)
def dict_validator(v) -> dict:
if isinstance(v, dict):
return v
try:
return dict(v)
except TypeError as e:
raise TypeError(f'value is not a valid dict, got {display_as_type(v)}') from e
def list_validator(v) -> list:
if isinstance(v, list):
return v
return list(v)
def tuple_validator(v) -> tuple:
if isinstance(v, tuple):
return v
return tuple(v)
def set_validator(v) -> set:
if isinstance(v, set):
return v
return set(v)
def enum_validator(v, field, config, **kwargs) -> Enum:
enum_v = field.type_(v)
return enum_v.value if config.use_enum_values else enum_v
def uuid_validator(v) -> UUID:
if isinstance(v, UUID):
return v
elif isinstance(v, str):
return UUID(v)
elif isinstance(v, (bytes, bytearray)):
return UUID(v.decode())
else:
raise ValueError(f'str, byte or native UUID type expected not {type(v)}')
# order is important here, for example: bool is a subclass of int so has to come first, datetime before date same
_VALIDATORS = [
(Enum, [enum_validator]),
(str, [not_none_validator, str_validator, anystr_length_validator]),
(bytes, [not_none_validator, bytes_validator, anystr_length_validator]),
(bool, [bool_validator]),
(int, [int, number_size_validator]),
(float, [float, number_size_validator]),
(Path, [Path]),
(datetime, [parse_datetime]),
(date, [parse_date]),
(time, [parse_time]),
(timedelta, [parse_duration]),
(OrderedDict, [ordered_dict_validator]),
(dict, [dict_validator]),
(list, [list_validator]),
(tuple, [tuple_validator]),
(set, [set_validator]),
(UUID, [not_none_validator, uuid_validator]),
]
def find_validators(type_):
if type_ is Any:
return []
for val_type, validators in _VALIDATORS:
try:
if issubclass(type_, val_type):
return validators
except TypeError as e:
raise TypeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})') from e
raise ConfigError(f'no validator found for {type_}')
| 25.937107 | 113 | 0.660281 | [
"MIT"
] | anentropic/pydantic | pydantic/validators.py | 4,124 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.